Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from PIL import Image | |
| from authtoken import auth_token | |
| import torch | |
| import torch.cuda.amp as amp | |
| from diffusers import StableDiffusionPipeline | |
| model_id = "stabilityai/stable-diffusion-2-1" | |
| device = torch.device("cpu") # Default to CPU device | |
| if torch.cuda.is_available(): | |
| device = torch.device("cuda") | |
| # Use the DPMSolverMultistepScheduler (DPM-Solver++) scheduler here instead | |
| pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) | |
| pipe.to(device) | |
| def generate(prompt): | |
| with torch.no_grad(), amp.autocast(enabled=device != torch.device("cpu")): | |
| image = pipe(prompt, guidance_scale=8.5)["sample"][0] | |
| image.save('generatedimage.png') | |
| return image | |
| def predict_text(prompt): | |
| image = generate(prompt) | |
| return image | |
| def predict_image(input_image): | |
| input_image.save('input_image.png') | |
| prompt = input("Enter your prompt: ") | |
| image = generate(prompt) | |
| return image | |
| iface = gr.Interface( | |
| fn=predict_text, | |
| inputs="text", | |
| outputs="image", | |
| capture_session=True, | |
| ) | |
| iface.launch() | |