Spaces:
Running
on
Zero
Running
on
Zero
| import torch | |
| import spaces | |
| import gradio as gr | |
| from diffusers import DiffusionPipeline | |
| import spaces | |
| from dataclasses import dataclass | |
| import json | |
| import logging | |
| import os | |
| import random | |
| import re | |
| import sys | |
| import warnings | |
| print("Loading Z-Image-Turbo pipeline...") | |
| pipe = DiffusionPipeline.from_pretrained( | |
| "Tongyi-MAI/Z-Image-Turbo",#"T5B/Z-Image-Turbo-FP8", | |
| torch_dtype=torch.bfloat16, | |
| low_cpu_mem_usage=False, | |
| attn_implementation="kernels-community/vllm-flash-attn3", | |
| ) | |
| #pipe.transformer.layers._repeated_blocks = ["ZImageTransformerBlock"] | |
| #spaces.aoti_blocks_load(pipe.transformer.layers, "zerogpu-aoti/Z-Image", variant="fa3") | |
| pipe.to("cuda") | |
| def generate_image(prompt, height, width, num_inference_steps, seed, randomize_seed, num_images): | |
| """Generate multiple images from the given prompt.""" | |
| print(prompt) | |
| if randomize_seed: | |
| seed = torch.randint(0, 2**32 - 1, (1,)).item() | |
| # Clamp num_images to max 3 | |
| num_images = min(max(1, int(num_images)), 3) | |
| generator = torch.Generator("cuda").manual_seed(int(seed)) | |
| result = pipe( | |
| prompt=prompt, | |
| height=int(height), | |
| width=int(width), | |
| num_inference_steps=int(num_inference_steps), | |
| guidance_scale=0.0, | |
| generator=generator, | |
| max_sequence_length=1024, | |
| num_images_per_prompt=num_images | |
| ) | |
| return result.images, seed | |
| # Example prompts | |
| examples = [ | |
| ["Young Chinese woman in red Hanfu, intricate embroidery. Impeccable makeup, red floral forehead pattern. Elaborate high bun, golden phoenix headdress, red flowers, beads. Holds round folding fan with lady, trees, bird. Neon lightning-bolt lamp, bright yellow glow, above extended left palm. Soft-lit outdoor night background, silhouetted tiered pagoda, blurred colorful distant lights."], | |
| ["A majestic dragon soaring through clouds at sunset, scales shimmering with iridescent colors, detailed fantasy art style"], | |
| ["Cozy coffee shop interior, warm lighting, rain on windows, plants on shelves, vintage aesthetic, photorealistic"], | |
| ["Astronaut riding a horse on Mars, cinematic lighting, sci-fi concept art, highly detailed"], | |
| ["Portrait of a wise old wizard with a long white beard, holding a glowing crystal staff, magical forest background"], | |
| ] | |
| # Build the Gradio interface | |
| with gr.Blocks(title="Z-Image-Turbo Demo") as demo: | |
| gr.Markdown( | |
| """ | |
| # 🎨 Z-Image-Turbo Multi Image Demo | |
| Generate high-quality images using the [Tongyi-MAI/Z-Image-Turbo](https://huggingface.co/Tongyi-MAI/Z-Image-Turbo) model. | |
| This turbo model generates images in just 8 inference steps! | |
| """ | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| prompt = gr.Textbox( | |
| label="Prompt", | |
| placeholder="Enter your image description...", | |
| lines=4, | |
| ) | |
| with gr.Row(): | |
| height = gr.Slider( | |
| minimum=512, | |
| maximum=2048, | |
| value=1024, | |
| step=64, | |
| label="Height", | |
| ) | |
| width = gr.Slider( | |
| minimum=512, | |
| maximum=2048, | |
| value=1024, | |
| step=64, | |
| label="Width", | |
| ) | |
| with gr.Row(): | |
| num_images = gr.Slider( | |
| minimum=1, | |
| maximum=3, | |
| value=2, | |
| step=1, | |
| label="Number of Images", | |
| ) | |
| with gr.Row(): | |
| num_inference_steps = gr.Slider( | |
| minimum=1, | |
| maximum=20, | |
| value=9, | |
| step=1, | |
| label="Inference Steps", | |
| info="9 steps results in 8 DiT forwards", | |
| ) | |
| with gr.Row(): | |
| seed = gr.Number( | |
| label="Seed", | |
| value=42, | |
| precision=0, | |
| ) | |
| randomize_seed = gr.Checkbox( | |
| label="Randomize Seed", | |
| value=False, | |
| ) | |
| generate_btn = gr.Button("🚀 Generate", variant="primary", size="lg") | |
| with gr.Column(scale=1): | |
| output_images = gr.Gallery( | |
| label="Generated Image", | |
| type="pil", | |
| ) | |
| used_seed = gr.Number( | |
| label="Seed Used", | |
| interactive=False, | |
| ) | |
| gr.Markdown("### 💡 Example Prompts") | |
| gr.Examples( | |
| examples=examples, | |
| inputs=[prompt], | |
| cache_examples=False, | |
| ) | |
| # Connect the generate button | |
| generate_btn.click( | |
| fn=generate_image, | |
| inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed, num_images], | |
| outputs=[output_images, used_seed], | |
| ) | |
| prompt.submit( | |
| fn=generate_image, | |
| inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed, num_images], | |
| outputs=[output_images, used_seed], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |