diff --git a/api/routers/generation.py b/api/routers/generation.py
index 8bdde97..b5c99ae 100644
--- a/api/routers/generation.py
+++ b/api/routers/generation.py
@@ -2,7 +2,7 @@
import threading
import traceback
import uuid
-from typing import Dict
+from typing import Dict, List
from fastapi import APIRouter, File, Form, UploadFile, HTTPException, BackgroundTasks
from services.generators.base import smooth_progress
@@ -18,7 +18,8 @@
@router.post("/from-image")
async def generate_from_image(
background_tasks: BackgroundTasks,
- image: UploadFile = File(...),
+ image: List[UploadFile] = File(...),
+ view_labels: str = Form(""),
model_id: str = Form("sf3d"),
collection: str = Form("Default"),
vertex_count: int = Form(10000),
@@ -30,8 +31,9 @@ async def generate_from_image(
seed: int = Form(-1),
num_inference_steps: int = Form(30),
):
- if not image.content_type or not image.content_type.startswith("image/"):
- raise HTTPException(400, "File must be an image")
+ for img in image:
+ if not img.content_type or not img.content_type.startswith("image/"):
+ raise HTTPException(400, "All files must be images")
if remesh not in ("quad", "triangle", "none"):
raise HTTPException(400, "remesh must be 'quad', 'triangle', or 'none'")
@@ -56,8 +58,12 @@ async def generate_from_image(
generator_registry.switch_model(model_id)
- job_id = str(uuid.uuid4())
- image_bytes = await image.read()
+ job_id = str(uuid.uuid4())
+ image_bytes_list = [await img.read() for img in image]
+ # Pass single bytes for backward compat, list for multi-view
+ image_data = image_bytes_list[0] if len(image_bytes_list) == 1 else image_bytes_list
+ # Parse view labels (e.g. "front,back" → ["front", "back"])
+ parsed_view_labels = [v.strip() for v in view_labels.split(",") if v.strip()] if view_labels else []
params = {
"vertex_count": vertex_count,
"remesh": remesh,
@@ -67,12 +73,13 @@ async def generate_from_image(
"guidance_scale": guidance_scale,
"seed": seed,
"num_inference_steps": num_inference_steps,
+ "view_labels": parsed_view_labels,
}
job = JobStatus(job_id=job_id, status="pending", progress=0)
_jobs[job_id] = job
- background_tasks.add_task(_run_generation, job_id, image_bytes, params, collection)
+ background_tasks.add_task(_run_generation, job_id, image_data, params, collection)
return {"job_id": job_id}
@@ -86,7 +93,7 @@ async def job_status(job_id: str):
return job
-async def _run_generation(job_id: str, image_bytes: bytes, params: dict, collection: str = "Default") -> None:
+async def _run_generation(job_id: str, image_bytes, params: dict, collection: str = "Default") -> None:
job = _jobs[job_id]
job.status = "running"
diff --git a/api/services/generators/base.py b/api/services/generators/base.py
index fdcb32a..fd2b420 100644
--- a/api/services/generators/base.py
+++ b/api/services/generators/base.py
@@ -4,7 +4,7 @@
from abc import ABC, abstractmethod
import threading
from pathlib import Path
-from typing import Callable, Optional
+from typing import Callable, List, Optional, Union
def smooth_progress(
@@ -84,12 +84,13 @@ def is_loaded(self) -> bool:
@abstractmethod
def generate(
self,
- image_bytes: bytes,
+ image_bytes: Union[bytes, List[bytes]],
params: dict,
progress_cb: Optional[Callable[[int, str], None]] = None,
) -> Path:
"""
- Starts 3D generation from an image.
+ Starts 3D generation from one or more images.
+ Pass a single bytes for single-view, or List[bytes] for multi-view.
Returns the path to the generated .glb file.
progress_cb(percent: int, step_label: str)
"""
diff --git a/api/services/generators/hunyuan3d.py b/api/services/generators/hunyuan3d.py
index 2a2b247..4205d9b 100644
--- a/api/services/generators/hunyuan3d.py
+++ b/api/services/generators/hunyuan3d.py
@@ -19,7 +19,7 @@
import uuid
import zipfile
from pathlib import Path
-from typing import Callable, Optional
+from typing import Callable, List, Optional, Union
from PIL import Image
@@ -84,7 +84,7 @@ def unload(self) -> None:
def generate(
self,
- image_bytes: bytes,
+ image_bytes: Union[bytes, List[bytes]],
params: dict,
progress_cb: Optional[Callable[[int, str], None]] = None,
) -> Path:
@@ -93,9 +93,23 @@ def generate(
num_steps = int(params.get("num_inference_steps", 50))
vert_count = int(params.get("vertex_count", 0))
- # Step 1 — background removal
- self._report(progress_cb, 5, "Removing background…")
- image = self._preprocess(image_bytes)
+ # Step 1 — background removal (single or multi-view)
+ view_labels = params.get("view_labels", [])
+ is_multiview = isinstance(image_bytes, list) and len(image_bytes) > 1
+ if is_multiview:
+ self._report(progress_cb, 5, f"Removing backgrounds ({len(image_bytes)} images)…")
+ processed_images = [self._preprocess(ib) for ib in image_bytes]
+ if view_labels and len(view_labels) == len(processed_images):
+ image = {label: img for label, img in zip(view_labels, processed_images)}
+ else:
+ fallback_keys = ["front", "left", "back", "right"]
+ image = {fallback_keys[i]: img for i, img in enumerate(processed_images[:4])}
+ elif isinstance(image_bytes, list):
+ self._report(progress_cb, 5, "Removing background…")
+ image = self._preprocess(image_bytes[0])
+ else:
+ self._report(progress_cb, 5, "Removing background…")
+ image = self._preprocess(image_bytes)
# Step 2 — shape generation (long, no internal callbacks)
self._report(progress_cb, 12, "Generating 3D shape…")
diff --git a/api/services/generators/hunyuan3d_mini.py b/api/services/generators/hunyuan3d_mini.py
index eb1e766..1645f25 100644
--- a/api/services/generators/hunyuan3d_mini.py
+++ b/api/services/generators/hunyuan3d_mini.py
@@ -17,7 +17,7 @@
import uuid
import zipfile
from pathlib import Path
-from typing import Callable, Optional
+from typing import Callable, List, Optional, Union
from PIL import Image
@@ -83,7 +83,7 @@ def unload(self) -> None:
def generate(
self,
- image_bytes: bytes,
+ image_bytes: Union[bytes, List[bytes]],
params: dict,
progress_cb: Optional[Callable[[int, str], None]] = None,
) -> Path:
@@ -96,9 +96,23 @@ def generate(
guidance_scale = float(params.get("guidance_scale", 5.5))
seed = int(params.get("seed", -1))
- # Step 1 — background removal
- self._report(progress_cb, 5, "Removing background…")
- image = self._preprocess(image_bytes)
+ # Step 1 — background removal (single or multi-view)
+ view_labels = params.get("view_labels", [])
+ is_multiview = isinstance(image_bytes, list) and len(image_bytes) > 1
+ if is_multiview:
+ self._report(progress_cb, 5, f"Removing backgrounds ({len(image_bytes)} images)…")
+ processed_images = [self._preprocess(ib) for ib in image_bytes]
+ if view_labels and len(view_labels) == len(processed_images):
+ image = {label: img for label, img in zip(view_labels, processed_images)}
+ else:
+ fallback_keys = ["front", "left", "back", "right"]
+ image = {fallback_keys[i]: img for i, img in enumerate(processed_images[:4])}
+ elif isinstance(image_bytes, list):
+ self._report(progress_cb, 5, "Removing background…")
+ image = self._preprocess(image_bytes[0])
+ else:
+ self._report(progress_cb, 5, "Removing background…")
+ image = self._preprocess(image_bytes)
# Step 2 — shape generation
# If texture is enabled, reserve 5-70% for shape and 70-95% for texture
diff --git a/api/services/generators/sf3d.py b/api/services/generators/sf3d.py
index 105df06..4d6e63a 100644
--- a/api/services/generators/sf3d.py
+++ b/api/services/generators/sf3d.py
@@ -9,7 +9,7 @@
import uuid
import zipfile
from pathlib import Path
-from typing import Callable, Optional
+from typing import Callable, List, Optional, Union
from PIL import Image
@@ -69,12 +69,16 @@ def load(self) -> None:
def generate(
self,
- image_bytes: bytes,
+ image_bytes: Union[bytes, List[bytes]],
params: dict,
progress_cb: Optional[Callable[[int, str], None]] = None,
) -> Path:
import torch
+ # SF3D only supports single-image input; use first image if list provided
+ if isinstance(image_bytes, list):
+ image_bytes = image_bytes[0]
+
vertex_count = int(params.get("vertex_count", 10000))
remesh = str(params.get("remesh", "quad"))
diff --git a/src/areas/generate/GeneratePage.tsx b/src/areas/generate/GeneratePage.tsx
index cf4cb5b..c30b38b 100644
--- a/src/areas/generate/GeneratePage.tsx
+++ b/src/areas/generate/GeneratePage.tsx
@@ -7,9 +7,10 @@ import WorkspacePanel from './components/WorkspacePanel'
import Viewer3D from './components/Viewer3D'
export default function GeneratePage(): JSX.Element {
- const selectedImagePath = useAppStore((s) => s.selectedImagePath)
+ const viewImages = useAppStore((s) => s.viewImages)
const { currentJob, startGeneration } = useGeneration()
const isGenerating = currentJob?.status === 'uploading' || currentJob?.status === 'generating'
+ const hasFrontImage = !!viewImages.front
return (
<>
@@ -23,8 +24,8 @@ export default function GeneratePage(): JSX.Element {
{/* Sticky bottom: Generate button */}