Update app.py
Browse files
app.py
CHANGED
@@ -4,23 +4,35 @@ from PIL import Image
|
|
4 |
import torch
|
5 |
|
6 |
# Load model and processor
|
7 |
-
|
8 |
-
|
|
|
|
|
|
|
9 |
|
10 |
# Define function to generate 3D output from 2D image
|
11 |
def image_to_3d(image):
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
# Gradio interface
|
19 |
interface = gr.Interface(
|
20 |
fn=image_to_3d,
|
21 |
inputs=gr.Image(type="pil"),
|
22 |
-
outputs="text", #
|
23 |
-
title="OpenLRM Mix-Large 1.1 - Image to 3D"
|
|
|
24 |
)
|
25 |
|
|
|
26 |
interface.launch()
|
|
|
4 |
import torch
|
5 |
|
6 |
# Load model and processor
|
7 |
+
try:
|
8 |
+
model = AutoModel.from_pretrained("zxhezexin/openlrm-mix-large-1.1")
|
9 |
+
processor = AutoProcessor.from_pretrained("zxhezexin/openlrm-mix-large-1.1")
|
10 |
+
except Exception as e:
|
11 |
+
print(f"Error loading model or processor: {e}")
|
12 |
|
13 |
# Define function to generate 3D output from 2D image
|
14 |
def image_to_3d(image):
|
15 |
+
try:
|
16 |
+
# Preprocess the input image
|
17 |
+
inputs = processor(images=image, return_tensors="pt")
|
18 |
+
|
19 |
+
# Run inference
|
20 |
+
with torch.no_grad():
|
21 |
+
outputs = model(**inputs)
|
22 |
+
|
23 |
+
# Placeholder return, replace this with actual 3D visualization
|
24 |
+
return "3D model generated!"
|
25 |
+
except Exception as e:
|
26 |
+
return f"Error during inference: {str(e)}"
|
27 |
|
28 |
# Gradio interface
|
29 |
interface = gr.Interface(
|
30 |
fn=image_to_3d,
|
31 |
inputs=gr.Image(type="pil"),
|
32 |
+
outputs="text", # Placeholder output (you can modify this for 3D)
|
33 |
+
title="OpenLRM Mix-Large 1.1 - Image to 3D",
|
34 |
+
description="Upload an image to generate a 3D model using OpenLRM Mix-Large 1.1."
|
35 |
)
|
36 |
|
37 |
+
# Launch the Gradio interface
|
38 |
interface.launch()
|