Rotate image
Browse files- gradio_demo.py +40 -6
gradio_demo.py
CHANGED
|
@@ -67,6 +67,24 @@ if torch.cuda.device_count() > 0:
|
|
| 67 |
else:
|
| 68 |
llava_agent = None
|
| 69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
def update_seed(is_randomize_seed, seed):
|
| 71 |
if is_randomize_seed:
|
| 72 |
return random.randint(0, 2147483647)
|
|
@@ -390,7 +408,7 @@ def restore(
|
|
| 390 |
hours = math.floor(minutes / 60)
|
| 391 |
minutes = minutes - (hours * 60)
|
| 392 |
information = ("Start the process again if you want a different result. " if randomize_seed else "") + \
|
| 393 |
-
"Wait " + str(allocation) + " min before a new run to avoid quota penalty. " + \
|
| 394 |
"The image(s) has(ve) been generated in " + \
|
| 395 |
((str(hours) + " h, ") if hours != 0 else "") + \
|
| 396 |
((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
|
|
@@ -503,8 +521,10 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
| 503 |
gr.HTML(title_html)
|
| 504 |
|
| 505 |
input_image = gr.Image(label="Input", show_label=True, type="numpy", height=600, elem_id="image-input")
|
|
|
|
|
|
|
| 506 |
with gr.Group():
|
| 507 |
-
prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible; I advise you to write in English as other languages may not be handled", value="", placeholder="A
|
| 508 |
prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
|
| 509 |
upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8]], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
|
| 510 |
allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min", 7], ["8 min", 8], ["9 min", 9], ["10 min", 10]], label="GPU allocation time", info="lower=May abort run, higher=Quota penalty for next runs", value=6, interactive=True)
|
|
@@ -591,6 +611,7 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
| 591 |
event_id = gr.Textbox(label="Event ID", value="", visible=False)
|
| 592 |
|
| 593 |
gr.Examples(
|
|
|
|
| 594 |
fn = stage2_process,
|
| 595 |
inputs = [
|
| 596 |
input_image,
|
|
@@ -696,6 +717,22 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
| 696 |
with gr.Row():
|
| 697 |
gr.Markdown(claim_md)
|
| 698 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 699 |
denoise_button.click(fn = check, inputs = [
|
| 700 |
input_image
|
| 701 |
], outputs = [], queue = False, show_progress = False).success(fn = stage1_process, inputs = [
|
|
@@ -724,10 +761,7 @@ with gr.Blocks(title="SUPIR") as interface:
|
|
| 724 |
seed
|
| 725 |
], queue = False, show_progress = False).then(fn = check, inputs = [
|
| 726 |
input_image
|
| 727 |
-
], outputs = [], queue = False, show_progress = False).success(fn
|
| 728 |
-
fb_score,
|
| 729 |
-
fb_text
|
| 730 |
-
], queue = False, show_progress = False).success(fn=stage2_process, inputs = [
|
| 731 |
input_image,
|
| 732 |
denoise_image,
|
| 733 |
prompt,
|
|
|
|
| 67 |
else:
|
| 68 |
llava_agent = None
|
| 69 |
|
| 70 |
+
def rotate_anti_90(image_array):
|
| 71 |
+
if image_array is None:
|
| 72 |
+
raise gr.Error("Please provide an image to rotate.")
|
| 73 |
+
|
| 74 |
+
image_pil = Image.fromarray(image_array)
|
| 75 |
+
image_pil = Image.rotate(image_pil, -90)
|
| 76 |
+
image_array = np.array(image_pil)
|
| 77 |
+
return image_array
|
| 78 |
+
|
| 79 |
+
def rotate_90(image_array):
|
| 80 |
+
if image_array is None:
|
| 81 |
+
raise gr.Error("Please provide an image to rotate.")
|
| 82 |
+
|
| 83 |
+
image_pil = Image.fromarray(image_array)
|
| 84 |
+
image_pil = Image.rotate(image_pil, 90)
|
| 85 |
+
image_array = np.array(image_pil)
|
| 86 |
+
return image_array
|
| 87 |
+
|
| 88 |
def update_seed(is_randomize_seed, seed):
|
| 89 |
if is_randomize_seed:
|
| 90 |
return random.randint(0, 2147483647)
|
|
|
|
| 408 |
hours = math.floor(minutes / 60)
|
| 409 |
minutes = minutes - (hours * 60)
|
| 410 |
information = ("Start the process again if you want a different result. " if randomize_seed else "") + \
|
| 411 |
+
"Wait " + str(allocation) + " min before a new run to avoid quota penalty or use another computer. " + \
|
| 412 |
"The image(s) has(ve) been generated in " + \
|
| 413 |
((str(hours) + " h, ") if hours != 0 else "") + \
|
| 414 |
((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
|
|
|
|
| 521 |
gr.HTML(title_html)
|
| 522 |
|
| 523 |
input_image = gr.Image(label="Input", show_label=True, type="numpy", height=600, elem_id="image-input")
|
| 524 |
+
rotate_anti_90_button = gr.Button(value="⤴ Rotate -90°", elem_id="rotate_anti_90_button")
|
| 525 |
+
rotate_90_button = gr.Button(value="⤵ Rotate +90°", elem_id="rotate_90_button")
|
| 526 |
with gr.Group():
|
| 527 |
+
prompt = gr.Textbox(label="Image description", info="Help the AI understand what the image represents; describe as much as possible, especially the details we can't see on the original image; I advise you to write in English as other languages may not be handled", value="", placeholder="A 27 years old woman, walking, in Santiago, morning, Summer, photorealistic", lines=3)
|
| 528 |
prompt_hint = gr.HTML("You can use a <a href='"'https://huggingface.co/spaces/MaziyarPanahi/llava-llama-3-8b'"'>LlaVa space</a> to auto-generate the description of your image.")
|
| 529 |
upscale = gr.Radio([["x1", 1], ["x2", 2], ["x3", 3], ["x4", 4], ["x5", 5], ["x6", 6], ["x7", 7], ["x8", 8]], label="Upscale factor", info="Resolution x1 to x8", value=2, interactive=True)
|
| 530 |
allocation = gr.Radio([["1 min", 1], ["2 min", 2], ["3 min", 3], ["4 min", 4], ["5 min", 5], ["6 min", 6], ["7 min", 7], ["8 min", 8], ["9 min", 9], ["10 min", 10]], label="GPU allocation time", info="lower=May abort run, higher=Quota penalty for next runs", value=6, interactive=True)
|
|
|
|
| 611 |
event_id = gr.Textbox(label="Event ID", value="", visible=False)
|
| 612 |
|
| 613 |
gr.Examples(
|
| 614 |
+
run_on_click = True,
|
| 615 |
fn = stage2_process,
|
| 616 |
inputs = [
|
| 617 |
input_image,
|
|
|
|
| 717 |
with gr.Row():
|
| 718 |
gr.Markdown(claim_md)
|
| 719 |
|
| 720 |
+
input_image.upload(fn = check, inputs = [
|
| 721 |
+
input_image
|
| 722 |
+
], outputs = [], queue = False, show_progress = False)
|
| 723 |
+
|
| 724 |
+
rotate_anti_90_button.click(fn = rotate_anti_90, inputs = [
|
| 725 |
+
input_image
|
| 726 |
+
], outputs = [
|
| 727 |
+
input_image
|
| 728 |
+
], queue = False, show_progress = False)
|
| 729 |
+
|
| 730 |
+
rotate_90_button.click(fn = rotate_90, inputs = [
|
| 731 |
+
input_image
|
| 732 |
+
], outputs = [
|
| 733 |
+
input_image
|
| 734 |
+
], queue = False, show_progress = False)
|
| 735 |
+
|
| 736 |
denoise_button.click(fn = check, inputs = [
|
| 737 |
input_image
|
| 738 |
], outputs = [], queue = False, show_progress = False).success(fn = stage1_process, inputs = [
|
|
|
|
| 761 |
seed
|
| 762 |
], queue = False, show_progress = False).then(fn = check, inputs = [
|
| 763 |
input_image
|
| 764 |
+
], outputs = [], queue = False, show_progress = False).success(fn=stage2_process, inputs = [
|
|
|
|
|
|
|
|
|
|
| 765 |
input_image,
|
| 766 |
denoise_image,
|
| 767 |
prompt,
|