Spaces:
Runtime error
Runtime error
| import os | |
| import sys | |
| import gradio as gr | |
| import numpy as np | |
| from PIL import Image | |
| import io | |
| import tempfile | |
| from pathlib import Path | |
| # Add notebook directory to path for inference code | |
| NOTEBOOK_PATH = "notebook" | |
| if os.path.exists(NOTEBOOK_PATH): | |
| sys.path.append(NOTEBOOK_PATH) | |
| # Import inference code with error handling | |
| try: | |
| from inference import Inference, load_image, load_single_mask | |
| INFERENCE_AVAILABLE = True | |
| except ImportError as e: | |
| print(f"Warning: Could not import inference module: {e}") | |
| print("Running in demo mode with mock functionality") | |
| INFERENCE_AVAILABLE = False | |
| def create_demo_3d_output(): | |
| """Create a demo 3D file for demonstration purposes""" | |
| demo_content = b"""# Demo 3D model file | |
| ply | |
| format ascii 1.0 | |
| element vertex 1000 | |
| property float x | |
| property float y | |
| property float z | |
| property float nx | |
| property float ny | |
| property float nz | |
| property uchar red | |
| property uchar green | |
| property uchar blue | |
| end_header | |
| """ | |
| # Add some demo vertices | |
| for i in range(1000): | |
| x, y, z = np.random.normal(0, 1, 3) | |
| nx, ny, nz = np.random.normal(0, 1, 3) | |
| r, g, b = np.random.randint(0, 256, 3) | |
| demo_content += f"{x:.3f} {y:.3f} {z:.3f} {nx:.3f} {ny:.3f} {nz:.3f} {r} {g} {b}\n" | |
| return demo_content | |
| def load_and_validate_image(image_path): | |
| """Load and validate image file""" | |
| try: | |
| img = Image.open(image_path) | |
| img = img.convert('RGB') | |
| return np.array(img) | |
| except Exception as e: | |
| raise ValueError(f"Error loading image: {str(e)}") | |
| def process_image_to_3d(image, mask=None, seed=42, model_tag="hf"): | |
| """Process image to 3D model""" | |
| try: | |
| if not INFERENCE_AVAILABLE: | |
| # Demo mode - return mock output | |
| demo_content = create_demo_3d_output() | |
| return { | |
| "status": "demo", | |
| "message": "Demo mode - inference module not available", | |
| "file_content": demo_content, | |
| "filename": "demo_splat.ply" | |
| } | |
| # Initialize inference if not already done | |
| config_path = f"checkpoints/{model_tag}/pipeline.yaml" | |
| # Create temporary files for the uploaded image and mask | |
| with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as img_temp: | |
| img = Image.fromarray(image) | |
| img.save(img_temp.name) | |
| temp_image_path = img_temp.name | |
| temp_mask_path = None | |
| if mask is not None: | |
| with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as mask_temp: | |
| mask_img = Image.fromarray(mask) | |
| mask_img.save(mask_temp.name) | |
| temp_mask_path = mask_temp.name | |
| # Load the model | |
| inference = Inference(config_path, compile=False) | |
| # Load image and mask | |
| loaded_image = load_image(temp_image_path) | |
| loaded_mask = load_single_mask(temp_mask_path) if temp_mask_path else None | |
| # Run inference | |
| output = inference(loaded_image, loaded_mask, seed=seed) | |
| # Export gaussian splat | |
| output_path = f"output_splat_{seed}.ply" | |
| output["gs"].save_ply(output_path) | |
| # Read the generated file | |
| with open(output_path, "rb") as f: | |
| file_content = f.read() | |
| # Clean up temporary files | |
| try: | |
| os.unlink(temp_image_path) | |
| if temp_mask_path: | |
| os.unlink(temp_mask_path) | |
| os.unlink(output_path) | |
| except: | |
| pass | |
| return { | |
| "status": "success", | |
| "message": "3D model generated successfully!", | |
| "file_content": file_content, | |
| "filename": f"splat_{seed}.ply" | |
| } | |
| except Exception as e: | |
| return { | |
| "status": "error", | |
| "message": f"Error processing image: {str(e)}", | |
| "file_content": None, | |
| "filename": None | |
| } | |
| def update_mask_status(mask_status, mask_image): | |
| """Update mask upload status""" | |
| if mask_image is not None: | |
| return "β Mask uploaded", gr.update(visible=True) | |
| else: | |
| return "No mask uploaded", gr.update(visible=False) | |
| def process_wrapper(image, mask, seed, model_tag): | |
| """Wrapper function for gradio interface""" | |
| if image is None: | |
| return "Please upload an image first", None, None | |
| # Show processing status | |
| yield "Processing image to 3D model...", None, None | |
| result = process_image_to_3d(image, mask, seed, model_tag) | |
| if result["status"] == "success": | |
| yield result["message"], result["file_content"], result["filename"] | |
| elif result["status"] == "demo": | |
| yield "Demo: " + result["message"], result["file_content"], result["filename"] | |
| else: | |
| yield "Error: " + result["message"], None, None | |
| def create_interface(): | |
| """Create the Gradio interface""" | |
| # Custom CSS for better styling | |
| css = """ | |
| .gradio-container { | |
| max-width: 1200px !important; | |
| margin: auto !important; | |
| } | |
| .upload-section { | |
| border: 2px dashed #ccc; | |
| padding: 20px; | |
| border-radius: 10px; | |
| background-color: #f9f9f9; | |
| } | |
| .status-message { | |
| padding: 10px; | |
| border-radius: 5px; | |
| margin: 10px 0; | |
| } | |
| .success { | |
| background-color: #d4edda; | |
| color: #155724; | |
| border: 1px solid #c3e6cb; | |
| } | |
| .error { | |
| background-color: #f8d7da; | |
| color: #721c24; | |
| border: 1px solid #f5c6cb; | |
| } | |
| """ | |
| with gr.Blocks(css=css, title="Image to 3D Converter") as demo: | |
| # Header | |
| gr.HTML(""" | |
| <div style="text-align: center; padding: 20px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; border-radius: 10px; margin-bottom: 30px;"> | |
| <h1 style="margin: 0; font-size: 2.5em;">π¨ Image to 3D Converter</h1> | |
| <p style="margin: 10px 0 0 0; font-size: 1.2em;">Transform your 2D images into stunning 3D models</p> | |
| <div style="margin-top: 15px; font-size: 0.9em;"> | |
| <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: #fff; text-decoration: none; border: 1px solid rgba(255,255,255,0.5); padding: 5px 15px; border-radius: 20px;">Built with anycoder</a> | |
| </div> | |
| </div> | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.HTML(""" | |
| <div class="upload-section"> | |
| <h3>π€ Upload Image</h3> | |
| <p>Upload the image you want to convert to 3D</p> | |
| </div> | |
| """) | |
| image_input = gr.Image( | |
| label="Input Image", | |
| type="numpy", | |
| image_mode="RGB", | |
| elem_classes=["upload-area"] | |
| ) | |
| with gr.Row(): | |
| mask_upload = gr.Image( | |
| label="Optional Mask", | |
| type="numpy", | |
| image_mode="L", | |
| image_edit=True, | |
| elem_classes=["upload-area"] | |
| ) | |
| mask_status = gr.Textbox( | |
| label="Mask Status", | |
| value="No mask uploaded", | |
| interactive=False, | |
| elem_classes=["mask-status"] | |
| ) | |
| with gr.Column(scale=1): | |
| gr.HTML(""" | |
| <div style="background: #f0f8ff; padding: 20px; border-radius: 10px; margin-bottom: 20px;"> | |
| <h3>βοΈ Configuration</h3> | |
| </div> | |
| """) | |
| with gr.Row(): | |
| seed = gr.Slider( | |
| minimum=0, | |
| maximum=999999, | |
| value=42, | |
| step=1, | |
| label="Random Seed", | |
| info="Controls the randomness in generation" | |
| ) | |
| model_tag = gr.Dropdown( | |
| choices=["hf"], | |
| value="hf", | |
| label="Model Tag", | |
| info="Select the model configuration" | |
| ) | |
| run_button = gr.Button( | |
| "π Generate 3D Model", | |
| variant="primary", | |
| size="lg" | |
| ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| status_output = gr.Textbox( | |
| label="Status", | |
| max_lines=5, | |
| interactive=False, | |
| elem_classes=["status-message"] | |
| ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| output_file = gr.File( | |
| label="Download 3D Model", | |
| file_types=[".ply"], | |
| visible=False, | |
| elem_classes=["download-section"] | |
| ) | |
| # Wire up the interface | |
| mask_upload.upload( | |
| fn=update_mask_status, | |
| inputs=[mask_status, mask_upload], | |
| outputs=[mask_status, output_file] | |
| ) | |
| run_button.click( | |
| fn=process_wrapper, | |
| inputs=[image_input, mask_upload, seed, model_tag], | |
| outputs=[status_output, output_file, gr.File()] | |
| ) | |
| # Examples section | |
| gr.HTML(""" | |
| <div style="margin-top: 40px; text-align: center;"> | |
| <h3>π How to Use</h3> | |
| <div style="display: flex; justify-content: space-around; margin-top: 20px; flex-wrap: wrap;"> | |
| <div style="max-width: 300px; padding: 20px; background: #f8f9fa; border-radius: 10px; margin: 10px;"> | |
| <h4>1. Upload Image</h4> | |
| <p>Choose a clear, well-lit image for best results</p> | |
| </div> | |
| <div style="max-width: 300px; padding: 20px; background: #f8f9fa; border-radius: 10px; margin: 10px;"> | |
| <h4>2. Add Mask (Optional)</h4> | |
| <p>Upload a mask to focus on specific areas</p> | |
| </div> | |
| <div style="max-width: 300px; padding: 20px; background: #f8f9fa; border-radius: 10px; margin: 10px;"> | |
| <h4>3. Generate</h4> | |
| <p>Click generate and wait for your 3D model</p> | |
| </div> | |
| </div> | |
| </div> | |
| """) | |
| return demo | |
| if __name__ == "__main__": | |
| # Create and launch the interface | |
| demo = create_interface() | |
| # Print available model paths for debugging | |
| print("Checking for model checkpoints...") | |
| if os.path.exists("checkpoints"): | |
| for root, dirs, files in os.walk("checkpoints"): | |
| print(f"Found in {root}: {files}") | |
| else: | |
| print("No checkpoints directory found") | |
| print("Available inference modules:", "β" if INFERENCE_AVAILABLE else "β") | |
| # Launch with proper configuration | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| share=False, | |
| show_error=True, | |
| debug=True, | |
| inbrowser=True | |
| ) | |