min_size=32
Browse files- gradio_demo.py +3 -3
gradio_demo.py
CHANGED
|
@@ -175,7 +175,7 @@ def stage2_process(
|
|
| 175 |
model.current_model = model_select
|
| 176 |
input_image = HWC3(input_image)
|
| 177 |
input_image = upscale_image(input_image, upscale, unit_resolution=32,
|
| 178 |
-
min_size=
|
| 179 |
|
| 180 |
LQ = np.array(input_image) / 255.0
|
| 181 |
LQ = np.power(LQ, gamma_correction)
|
|
@@ -327,7 +327,7 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
| 327 |
with gr.Group():
|
| 328 |
prompt = gr.Textbox(label="Image description for LlaVa", value="", placeholder="A person, walking, in a town, Summer, photorealistic", lines=3, visible=False)
|
| 329 |
upscale = gr.Radio([1, 2, 3, 4, 5, 6, 7, 8], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
|
| 330 |
-
a_prompt = gr.Textbox(label="Image description
|
| 331 |
info="Help the AI understand what the image represents; describe as much as possible",
|
| 332 |
value='Cinematic, High Contrast, highly detailed, taken using a Canon EOS R '
|
| 333 |
'camera, hyper detailed photo - realistic maximum detail, 32k, Color '
|
|
@@ -338,7 +338,7 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
| 338 |
output_format = gr.Radio(["png", "webp", "jpeg", "gif", "bmp"], label="Image format for result", info="File extention", value="png", interactive=True)
|
| 339 |
|
| 340 |
with gr.Accordion("Pre-denoising (optional)", open=False):
|
| 341 |
-
gamma_correction = gr.Slider(label="Gamma Correction", minimum=0.1, maximum=2.0, value=1.0, step=0.1)
|
| 342 |
denoise_button = gr.Button(value="Pre-denoise")
|
| 343 |
denoise_image = gr.Image(label="Denoised image", show_label=True, type="numpy", height=600, elem_id="image-s1")
|
| 344 |
denoise_information = gr.HTML(value="If present, the denoised image will be used for the restoration instead of the input image.", visible=False)
|
|
|
|
| 175 |
model.current_model = model_select
|
| 176 |
input_image = HWC3(input_image)
|
| 177 |
input_image = upscale_image(input_image, upscale, unit_resolution=32,
|
| 178 |
+
min_size=32)
|
| 179 |
|
| 180 |
LQ = np.array(input_image) / 255.0
|
| 181 |
LQ = np.power(LQ, gamma_correction)
|
|
|
|
| 327 |
with gr.Group():
|
| 328 |
prompt = gr.Textbox(label="Image description for LlaVa", value="", placeholder="A person, walking, in a town, Summer, photorealistic", lines=3, visible=False)
|
| 329 |
upscale = gr.Radio([1, 2, 3, 4, 5, 6, 7, 8], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
|
| 330 |
+
a_prompt = gr.Textbox(label="Image description",
|
| 331 |
info="Help the AI understand what the image represents; describe as much as possible",
|
| 332 |
value='Cinematic, High Contrast, highly detailed, taken using a Canon EOS R '
|
| 333 |
'camera, hyper detailed photo - realistic maximum detail, 32k, Color '
|
|
|
|
| 338 |
output_format = gr.Radio(["png", "webp", "jpeg", "gif", "bmp"], label="Image format for result", info="File extention", value="png", interactive=True)
|
| 339 |
|
| 340 |
with gr.Accordion("Pre-denoising (optional)", open=False):
|
| 341 |
+
gamma_correction = gr.Slider(label="Gamma Correction", info = "lower=lighter, higher=darker", minimum=0.1, maximum=2.0, value=1.0, step=0.1)
|
| 342 |
denoise_button = gr.Button(value="Pre-denoise")
|
| 343 |
denoise_image = gr.Image(label="Denoised image", show_label=True, type="numpy", height=600, elem_id="image-s1")
|
| 344 |
denoise_information = gr.HTML(value="If present, the denoised image will be used for the restoration instead of the input image.", visible=False)
|