Spaces:
Paused
Paused
Update app_quant_latent1.py
Browse files- app_quant_latent1.py +40 -43
app_quant_latent1.py
CHANGED
|
@@ -254,53 +254,50 @@ import torch
|
|
| 254 |
|
| 255 |
|
| 256 |
def safe_generate_with_latents(
|
| 257 |
-
transformer,
|
| 258 |
-
vae,
|
| 259 |
-
text_encoder,
|
| 260 |
-
tokenizer,
|
| 261 |
-
scheduler,
|
| 262 |
-
pipe,
|
| 263 |
-
prompt,
|
| 264 |
-
height,
|
| 265 |
-
width,
|
| 266 |
-
steps,
|
| 267 |
-
guidance_scale,
|
| 268 |
-
negative_prompt,
|
| 269 |
-
num_images_per_prompt,
|
| 270 |
-
generator,
|
| 271 |
-
cfg_normalization,
|
| 272 |
-
cfg_truncation,
|
| 273 |
-
max_sequence_length,
|
| 274 |
-
):
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
ANY error → returns None and logs it.
|
| 278 |
-
"""
|
| 279 |
-
try:
|
| 280 |
# --- Your ORIGINAL generate() code pasted here EXACTLY ---
|
| 281 |
-
latents_or_images = generate(
|
| 282 |
-
transformer=transformer,
|
| 283 |
-
vae=vae,
|
| 284 |
-
text_encoder=text_encoder,
|
| 285 |
-
tokenizer=tokenizer,
|
| 286 |
-
scheduler=scheduler,
|
| 287 |
-
prompt=prompt,
|
| 288 |
-
height=height,
|
| 289 |
-
width=width,
|
| 290 |
-
num_inference_steps=steps,
|
| 291 |
-
guidance_scale=guidance_scale,
|
| 292 |
-
negative_prompt=negative_prompt,
|
| 293 |
-
num_images_per_prompt=num_images_per_prompt,
|
| 294 |
-
generator=generator,
|
| 295 |
-
cfg_normalization=cfg_normalization,
|
| 296 |
-
cfg_truncation=cfg_truncation,
|
| 297 |
-
max_sequence_length=max_sequence_length,
|
| 298 |
-
output_type="latent", # IMPORTANT
|
| 299 |
-
)
|
| 300 |
|
| 301 |
return latents_or_images, None
|
| 302 |
|
| 303 |
-
except Exception as e:
|
| 304 |
return None, e
|
| 305 |
|
| 306 |
|
|
|
|
| 254 |
|
| 255 |
|
| 256 |
def safe_generate_with_latents(
|
| 257 |
+
transformer,
|
| 258 |
+
vae,
|
| 259 |
+
text_encoder,
|
| 260 |
+
tokenizer,
|
| 261 |
+
scheduler,
|
| 262 |
+
pipe,
|
| 263 |
+
prompt,
|
| 264 |
+
height,
|
| 265 |
+
width,
|
| 266 |
+
steps,
|
| 267 |
+
guidance_scale,
|
| 268 |
+
negative_prompt,
|
| 269 |
+
num_images_per_prompt,
|
| 270 |
+
generator,
|
| 271 |
+
cfg_normalization,
|
| 272 |
+
cfg_truncation,
|
| 273 |
+
max_sequence_length,
|
| 274 |
+
):
|
| 275 |
+
|
| 276 |
+
try:
|
|
|
|
|
|
|
|
|
|
| 277 |
# --- Your ORIGINAL generate() code pasted here EXACTLY ---
|
| 278 |
+
latents_or_images = generate(
|
| 279 |
+
transformer=transformer,
|
| 280 |
+
vae=vae,
|
| 281 |
+
text_encoder=text_encoder,
|
| 282 |
+
tokenizer=tokenizer,
|
| 283 |
+
scheduler=scheduler,
|
| 284 |
+
prompt=prompt,
|
| 285 |
+
height=height,
|
| 286 |
+
width=width,
|
| 287 |
+
num_inference_steps=steps,
|
| 288 |
+
guidance_scale=guidance_scale,
|
| 289 |
+
negative_prompt=negative_prompt,
|
| 290 |
+
num_images_per_prompt=num_images_per_prompt,
|
| 291 |
+
generator=generator,
|
| 292 |
+
cfg_normalization=cfg_normalization,
|
| 293 |
+
cfg_truncation=cfg_truncation,
|
| 294 |
+
max_sequence_length=max_sequence_length,
|
| 295 |
+
output_type="latent", # IMPORTANT
|
| 296 |
+
)
|
| 297 |
|
| 298 |
return latents_or_images, None
|
| 299 |
|
| 300 |
+
except Exception as e:
|
| 301 |
return None, e
|
| 302 |
|
| 303 |
|