Fabrice-TIERCELIN commited on
Commit
8ab6e43
·
verified ·
1 Parent(s): 12f50c1

min_size parameter

Browse files
Files changed (1) hide show
  1. gradio_demo.py +5 -2
gradio_demo.py CHANGED
@@ -125,6 +125,7 @@ def stage2_process(
125
  a_prompt,
126
  n_prompt,
127
  num_samples,
 
128
  downscale,
129
  upscale,
130
  edm_steps,
@@ -148,7 +149,6 @@ def stage2_process(
148
  ):
149
  start = time.time()
150
  print('Start stage2_process')
151
- print(a_prompt)
152
  if torch.cuda.device_count() == 0:
153
  gr.Warning('Set this space to GPU config to make it work.')
154
  return None, None, None
@@ -175,7 +175,7 @@ def stage2_process(
175
  model.current_model = model_select
176
  input_image = HWC3(input_image)
177
  input_image = upscale_image(input_image, upscale, unit_resolution=32,
178
- min_size=32)
179
 
180
  LQ = np.array(input_image) / 255.0
181
  LQ = np.power(LQ, gamma_correction)
@@ -228,6 +228,7 @@ def stage2_process(
228
  ((str(hours) + " h, ") if hours != 0 else "") + \
229
  ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
230
  str(secondes) + " sec."
 
231
 
232
  # Only one image can be shown in the slider
233
  return [noisy_image] + [results[0]], gr.update(format = output_format, value = [noisy_image] + results), gr.update(value = information, visible = True), event_id
@@ -359,6 +360,7 @@ with gr.Blocks(title="SUPIR") as interface:
359
  edm_steps = gr.Slider(label="Steps", info="lower=faster, higher=more details", minimum=1, maximum=200, value=default_setting.edm_steps if torch.cuda.device_count() > 0 else 1, step=1)
360
  num_samples = gr.Slider(label="Num Samples", info="Number of generated results", minimum=1, maximum=4 if not args.use_image_slider else 1
361
  , value=1, step=1)
 
362
  downscale = gr.Radio([1, 2, 3, 4, 5, 6, 7, 8], label="Pre-downscale factor", info="Reducing blurred image reduce the process time", value=1, interactive=True)
363
  with gr.Row():
364
  with gr.Column():
@@ -449,6 +451,7 @@ with gr.Blocks(title="SUPIR") as interface:
449
  a_prompt,
450
  n_prompt,
451
  num_samples,
 
452
  downscale,
453
  upscale,
454
  edm_steps,
 
125
  a_prompt,
126
  n_prompt,
127
  num_samples,
128
+ min_size,
129
  downscale,
130
  upscale,
131
  edm_steps,
 
149
  ):
150
  start = time.time()
151
  print('Start stage2_process')
 
152
  if torch.cuda.device_count() == 0:
153
  gr.Warning('Set this space to GPU config to make it work.')
154
  return None, None, None
 
175
  model.current_model = model_select
176
  input_image = HWC3(input_image)
177
  input_image = upscale_image(input_image, upscale, unit_resolution=32,
178
+ min_size=min_size)
179
 
180
  LQ = np.array(input_image) / 255.0
181
  LQ = np.power(LQ, gamma_correction)
 
228
  ((str(hours) + " h, ") if hours != 0 else "") + \
229
  ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
230
  str(secondes) + " sec."
231
+ print(information)
232
 
233
  # Only one image can be shown in the slider
234
  return [noisy_image] + [results[0]], gr.update(format = output_format, value = [noisy_image] + results), gr.update(value = information, visible = True), event_id
 
360
  edm_steps = gr.Slider(label="Steps", info="lower=faster, higher=more details", minimum=1, maximum=200, value=default_setting.edm_steps if torch.cuda.device_count() > 0 else 1, step=1)
361
  num_samples = gr.Slider(label="Num Samples", info="Number of generated results", minimum=1, maximum=4 if not args.use_image_slider else 1
362
  , value=1, step=1)
363
+ min_size = gr.Slider(label="Minimum size", info="Minimum height, minimum width", minimum=32, value=1024, step=32)
364
  downscale = gr.Radio([1, 2, 3, 4, 5, 6, 7, 8], label="Pre-downscale factor", info="Reducing blurred image reduce the process time", value=1, interactive=True)
365
  with gr.Row():
366
  with gr.Column():
 
451
  a_prompt,
452
  n_prompt,
453
  num_samples,
454
+ min_size,
455
  downscale,
456
  upscale,
457
  edm_steps,