Rollback
Browse files- gradio_demo.py +4 -43
gradio_demo.py
CHANGED
|
@@ -117,6 +117,7 @@ def llave_process(input_image, temperature, top_p, qs=None):
|
|
| 117 |
print('<<== llave_process')
|
| 118 |
return captions[0]
|
| 119 |
|
|
|
|
| 120 |
def stage2_process(
|
| 121 |
noisy_image,
|
| 122 |
denoise_image,
|
|
@@ -160,6 +161,7 @@ def stage2_process(
|
|
| 160 |
if 1 < downscale:
|
| 161 |
input_height, input_width, input_channel = np.array(input_image).shape
|
| 162 |
input_image = input_image.resize((input_width // downscale, input_height // downscale), Image.LANCZOS)
|
|
|
|
| 163 |
event_id = str(time.time_ns())
|
| 164 |
event_dict = {'event_id': event_id, 'localtime': time.ctime(), 'prompt': prompt, 'a_prompt': a_prompt,
|
| 165 |
'n_prompt': n_prompt, 'num_samples': num_samples, 'upscale': upscale, 'edm_steps': edm_steps,
|
|
@@ -180,47 +182,6 @@ def stage2_process(
|
|
| 180 |
input_image = upscale_image(input_image, upscale, unit_resolution=32,
|
| 181 |
min_size=min_size)
|
| 182 |
|
| 183 |
-
result_slider, result_gallery, restore_information, event_id = restore(
|
| 184 |
-
model,
|
| 185 |
-
edm_steps,
|
| 186 |
-
s_stage1,
|
| 187 |
-
s_churn,
|
| 188 |
-
s_noise,
|
| 189 |
-
s_cfg,
|
| 190 |
-
s_stage2,
|
| 191 |
-
seed,
|
| 192 |
-
num_samples,
|
| 193 |
-
a_prompt,
|
| 194 |
-
n_prompt,
|
| 195 |
-
color_fix_type,
|
| 196 |
-
linear_CFG,
|
| 197 |
-
linear_s_stage2,
|
| 198 |
-
spt_linear_CFG,
|
| 199 |
-
spt_linear_s_stage2
|
| 200 |
-
)
|
| 201 |
-
|
| 202 |
-
return result_slider, result_gallery, restore_information, event_id
|
| 203 |
-
|
| 204 |
-
@spaces.GPU(duration=540)
|
| 205 |
-
def restore(
|
| 206 |
-
model,
|
| 207 |
-
edm_steps,
|
| 208 |
-
s_stage1,
|
| 209 |
-
s_churn,
|
| 210 |
-
s_noise,
|
| 211 |
-
s_cfg,
|
| 212 |
-
s_stage2,
|
| 213 |
-
seed,
|
| 214 |
-
num_samples,
|
| 215 |
-
a_prompt,
|
| 216 |
-
n_prompt,
|
| 217 |
-
color_fix_type,
|
| 218 |
-
linear_CFG,
|
| 219 |
-
linear_s_stage2,
|
| 220 |
-
spt_linear_CFG,
|
| 221 |
-
spt_linear_s_stage2
|
| 222 |
-
):
|
| 223 |
-
torch.cuda.set_device(SUPIR_device)
|
| 224 |
LQ = np.array(input_image) / 255.0
|
| 225 |
LQ = np.power(LQ, gamma_correction)
|
| 226 |
LQ *= 255.0
|
|
@@ -328,7 +289,7 @@ def submit_feedback(event_id, fb_score, fb_text):
|
|
| 328 |
|
| 329 |
title_html = """
|
| 330 |
<h1><center>SUPIR</center></h1>
|
| 331 |
-
<center
|
| 332 |
<center><big><big>🤸<big><big><big><big><big><big>🤸</big></big></big></big></big></big></big></big></center>
|
| 333 |
|
| 334 |
<p>This is an online demo of SUPIR, a practicing model scaling for photo-realistic image restoration.
|
|
@@ -366,6 +327,7 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
| 366 |
input_image = gr.Image(label="Input", show_label=True, type="numpy", height=600, elem_id="image-input")
|
| 367 |
with gr.Group():
|
| 368 |
prompt = gr.Textbox(label="Image description for LlaVa", value="", placeholder="A person, walking, in a town, Summer, photorealistic", lines=3, visible=False)
|
|
|
|
| 369 |
a_prompt = gr.Textbox(label="Image description",
|
| 370 |
info="Help the AI understand what the image represents; describe as much as possible",
|
| 371 |
value='Cinematic, High Contrast, highly detailed, taken using a Canon EOS R '
|
|
@@ -374,7 +336,6 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
| 374 |
'hyper sharpness, perfect without deformations.',
|
| 375 |
lines=3)
|
| 376 |
a_prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
|
| 377 |
-
upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8]], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
|
| 378 |
output_format = gr.Radio([["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="png", interactive=True)
|
| 379 |
|
| 380 |
with gr.Accordion("Pre-denoising (optional)", open=False):
|
|
|
|
| 117 |
print('<<== llave_process')
|
| 118 |
return captions[0]
|
| 119 |
|
| 120 |
+
@spaces.GPU(duration=540)
|
| 121 |
def stage2_process(
|
| 122 |
noisy_image,
|
| 123 |
denoise_image,
|
|
|
|
| 161 |
if 1 < downscale:
|
| 162 |
input_height, input_width, input_channel = np.array(input_image).shape
|
| 163 |
input_image = input_image.resize((input_width // downscale, input_height // downscale), Image.LANCZOS)
|
| 164 |
+
torch.cuda.set_device(SUPIR_device)
|
| 165 |
event_id = str(time.time_ns())
|
| 166 |
event_dict = {'event_id': event_id, 'localtime': time.ctime(), 'prompt': prompt, 'a_prompt': a_prompt,
|
| 167 |
'n_prompt': n_prompt, 'num_samples': num_samples, 'upscale': upscale, 'edm_steps': edm_steps,
|
|
|
|
| 182 |
input_image = upscale_image(input_image, upscale, unit_resolution=32,
|
| 183 |
min_size=min_size)
|
| 184 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
LQ = np.array(input_image) / 255.0
|
| 186 |
LQ = np.power(LQ, gamma_correction)
|
| 187 |
LQ *= 255.0
|
|
|
|
| 289 |
|
| 290 |
title_html = """
|
| 291 |
<h1><center>SUPIR</center></h1>
|
| 292 |
+
<big><center>Upscale your images up to x8 freely, without account, without watermark and download it</center></big>
|
| 293 |
<center><big><big>🤸<big><big><big><big><big><big>🤸</big></big></big></big></big></big></big></big></center>
|
| 294 |
|
| 295 |
<p>This is an online demo of SUPIR, a practicing model scaling for photo-realistic image restoration.
|
|
|
|
| 327 |
input_image = gr.Image(label="Input", show_label=True, type="numpy", height=600, elem_id="image-input")
|
| 328 |
with gr.Group():
|
| 329 |
prompt = gr.Textbox(label="Image description for LlaVa", value="", placeholder="A person, walking, in a town, Summer, photorealistic", lines=3, visible=False)
|
| 330 |
+
upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8]], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
|
| 331 |
a_prompt = gr.Textbox(label="Image description",
|
| 332 |
info="Help the AI understand what the image represents; describe as much as possible",
|
| 333 |
value='Cinematic, High Contrast, highly detailed, taken using a Canon EOS R '
|
|
|
|
| 336 |
'hyper sharpness, perfect without deformations.',
|
| 337 |
lines=3)
|
| 338 |
a_prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
|
|
|
|
| 339 |
output_format = gr.Radio([["*.png", "png"], ["*.webp", "webp"], ["*.jpeg", "jpeg"], ["*.gif", "gif"], ["*.bmp", "bmp"]], label="Image format for result", info="File extention", value="png", interactive=True)
|
| 340 |
|
| 341 |
with gr.Accordion("Pre-denoising (optional)", open=False):
|