Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse files- app.py +149 -0
- requirements.txt +15 -0
app.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import gradio as gr
|
| 3 |
+
from diffusers import DiffusionPipeline
|
| 4 |
+
|
| 5 |
+
# Load the pipeline once at startup
|
| 6 |
+
print("π Loading Z-Image-Turbo pipeline...")
|
| 7 |
+
pipe = DiffusionPipeline.from_pretrained(
|
| 8 |
+
"Tongyi-MAI/Z-Image-Turbo",
|
| 9 |
+
torch_dtype=torch.bfloat16,
|
| 10 |
+
low_cpu_mem_usage=False,
|
| 11 |
+
)
|
| 12 |
+
pipe.to("cuda")
|
| 13 |
+
|
| 14 |
+
print("β
Pipeline loaded!")
|
| 15 |
+
|
| 16 |
+
def generate_image(prompt, height, width, num_inference_steps, seed, randomize_seed):
|
| 17 |
+
"""Generate an image from the given prompt."""
|
| 18 |
+
if randomize_seed:
|
| 19 |
+
seed = torch.randint(0, 2**32 - 1, (1,)).item()
|
| 20 |
+
|
| 21 |
+
generator = torch.Generator("cuda").manual_seed(int(seed))
|
| 22 |
+
image = pipe(
|
| 23 |
+
prompt=prompt,
|
| 24 |
+
height=int(height),
|
| 25 |
+
width=int(width),
|
| 26 |
+
num_inference_steps=int(num_inference_steps),
|
| 27 |
+
guidance_scale=0.0, # Guidance should be 0 for Turbo models
|
| 28 |
+
generator=generator,
|
| 29 |
+
).images[0]
|
| 30 |
+
|
| 31 |
+
return image, seed
|
| 32 |
+
|
| 33 |
+
# Example prompts
|
| 34 |
+
examples = [
|
| 35 |
+
["Young Chinese woman in red Hanfu, intricate embroidery. Impeccable makeup, red floral forehead pattern. Elaborate high bun, golden phoenix headdress, red flowers, beads. Holds round folding fan with lady, trees, bird. Neon lightning-bolt lamp, bright yellow glow, above extended left palm. Soft-lit outdoor night background, silhouetted tiered pagoda, blurred colorful distant lights."],
|
| 36 |
+
["A majestic dragon soaring through clouds at sunset, scales shimmering with iridescent colors, detailed fantasy art style"],
|
| 37 |
+
["Cozy coffee shop interior, warm lighting, rain on windows, plants on shelves, vintage aesthetic, photorealistic"],
|
| 38 |
+
["Astronaut riding a horse on Mars, cinematic lighting, sci-fi concept art, highly detailed"],
|
| 39 |
+
["Portrait of a wise old wizard with a long white beard, holding a glowing crystal staff, magical forest background"],
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
# Modern Gradio 6 Blocks with minimal design
|
| 43 |
+
with gr.Blocks(
|
| 44 |
+
title="Z-Image-Turbo",
|
| 45 |
+
fill_height=True,
|
| 46 |
+
footer_links=[{"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"}]
|
| 47 |
+
) as demo:
|
| 48 |
+
|
| 49 |
+
# Header with clean, modern design
|
| 50 |
+
with gr.Row():
|
| 51 |
+
gr.Markdown("""
|
| 52 |
+
# π¨ Z-Image-Turbo
|
| 53 |
+
|
| 54 |
+
Generate high-quality images in just 8 inference steps using the [Tongyi-MAI/Z-Image-Turbo](https://huggingface.co/Tongyi-MAI/Z-Image-Turbo) model.
|
| 55 |
+
""")
|
| 56 |
+
|
| 57 |
+
# Main content area
|
| 58 |
+
with gr.Row():
|
| 59 |
+
with gr.Column(scale=1, min_width=320):
|
| 60 |
+
# Prompt input - clean and focused
|
| 61 |
+
prompt = gr.Textbox(
|
| 62 |
+
label="β¨ Image Description",
|
| 63 |
+
placeholder="Describe the image you want to generate...",
|
| 64 |
+
lines=3,
|
| 65 |
+
max_lines=6,
|
| 66 |
+
autofocus=True,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
# Compact settings in an accordion
|
| 70 |
+
with gr.Accordion("βοΈ Advanced Settings", open=False):
|
| 71 |
+
with gr.Row():
|
| 72 |
+
height = gr.Slider(
|
| 73 |
+
label="π Height",
|
| 74 |
+
minimum=512,
|
| 75 |
+
maximum=2048,
|
| 76 |
+
value=1024,
|
| 77 |
+
)
|
| 78 |
+
with gr.Row():
|
| 79 |
+
width = gr.Slider(
|
| 80 |
+
label="π Width",
|
| 81 |
+
minimum=512,
|
| 82 |
+
maximum=2048,
|
| 83 |
+
value=1024,
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
with gr.Row():
|
| 87 |
+
num_inference_steps = gr.Slider(
|
| 88 |
+
label="β‘ Inference Steps",
|
| 89 |
+
minimum=1,
|
| 90 |
+
maximum=20,
|
| 91 |
+
value=9,
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
with gr.Row():
|
| 95 |
+
seed = gr.Number(
|
| 96 |
+
label="π± Seed",
|
| 97 |
+
value=42,
|
| 98 |
+
precision=0,
|
| 99 |
+
)
|
| 100 |
+
randomize_seed = gr.Checkbox(
|
| 101 |
+
label="π² Randomize Seed",
|
| 102 |
+
value=False,
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
# Generate button - prominent and clear
|
| 106 |
+
generate_btn = gr.Button(
|
| 107 |
+
"π Generate Image",
|
| 108 |
+
variant="primary",
|
| 109 |
+
size="lg",
|
| 110 |
+
scale=1,
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
with gr.Column(scale=1, min_width=320):
|
| 114 |
+
output_image = gr.Image(
|
| 115 |
+
label="π¨ Generated Image",
|
| 116 |
+
type="pil",
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
# Examples section
|
| 120 |
+
with gr.Row():
|
| 121 |
+
gr.Markdown("### π‘ Try These Examples")
|
| 122 |
+
gr.Examples(
|
| 123 |
+
examples=examples,
|
| 124 |
+
inputs=prompt,
|
| 125 |
+
cache_examples=False,
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
# Connect the generate button
|
| 129 |
+
generate_btn.click(
|
| 130 |
+
fn=generate_image,
|
| 131 |
+
inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed],
|
| 132 |
+
outputs=[output_image, seed],
|
| 133 |
+
api_visibility="public",
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
# Also allow generating by pressing Enter in the prompt box
|
| 137 |
+
prompt.submit(
|
| 138 |
+
fn=generate_image,
|
| 139 |
+
inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed],
|
| 140 |
+
outputs=[output_image, seed],
|
| 141 |
+
api_visibility="public",
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
if __name__ == "__main__":
|
| 145 |
+
demo.launch(
|
| 146 |
+
mcp_server=True,
|
| 147 |
+
show_error=True,
|
| 148 |
+
share=False,
|
| 149 |
+
)
|
requirements.txt
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
torch
|
| 2 |
+
torchvision
|
| 3 |
+
torchaudio
|
| 4 |
+
gradio
|
| 5 |
+
requests
|
| 6 |
+
Pillow
|
| 7 |
+
git+https://github.com/huggingface/diffusers
|
| 8 |
+
git+https://github.com/huggingface/transformers
|
| 9 |
+
sentencepiece
|
| 10 |
+
accelerate
|
| 11 |
+
tokenizers
|
| 12 |
+
datasets
|
| 13 |
+
numpy
|
| 14 |
+
scipy
|
| 15 |
+
matplotlib
|