prithivMLmods commited on
Commit
9426a65
·
verified ·
1 Parent(s): 37d78ab

update app

Browse files
Files changed (1) hide show
  1. app.py +25 -40
app.py CHANGED
@@ -105,20 +105,20 @@ pipe.fuse_lora(adapter_names=["lightning"], lora_scale=1.0)
105
 
106
  print("Loading Task Adapters...")
107
 
108
- pipe.load_lora_weights("tarn59/apply_texture_qwen_image_edit_2509",
109
- weight_name="apply_texture_v2_qwen_image_edit_2509.safetensors",
110
  adapter_name="texture")
111
 
112
- pipe.load_lora_weights("ostris/qwen_image_edit_inpainting",
113
- weight_name="qwen_image_edit_inpainting.safetensors",
114
  adapter_name="fusion")
115
 
116
- pipe.load_lora_weights("ostris/qwen_image_edit_2509_shirt_design",
117
- weight_name="qwen_image_edit_2509_shirt_design.safetensors",
118
  adapter_name="shirt_design")
119
 
120
- pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Fusion",
121
- weight_name="溶图.safetensors",
122
  adapter_name="fusion-x")
123
 
124
  try:
@@ -144,6 +144,7 @@ def update_dimensions_on_upload(image):
144
  aspect_ratio = original_width / original_height
145
  new_width = int(new_height * aspect_ratio)
146
 
 
147
  new_width = (new_width // 16) * 16
148
  new_height = (new_height // 16) * 16
149
 
@@ -161,14 +162,9 @@ def infer(
161
  steps,
162
  progress=gr.Progress(track_tqdm=True)
163
  ):
164
- if image_1 is None:
165
- raise gr.Error("Please upload a base image.")
166
-
167
- is_two_image_task = lora_adapter in ["Texture Edit", "Fuse-Objects", "Cloth-Design-Fuse", "Super-Fusion"]
168
-
169
- if is_two_image_task and image_2 is None:
170
- raise gr.Error("Please upload both images for this editing style.")
171
-
172
  if not prompt:
173
  if lora_adapter == "Cloth-Design-Fuse":
174
  prompt = "Put this design on their shirt."
@@ -178,9 +174,7 @@ def infer(
178
  prompt = "Fuse object into background."
179
  elif lora_adapter == "Super-Fusion":
180
  prompt = "Blend the product into the background, correct its perspective and lighting, and make it naturally integrated with the scene."
181
- elif lora_adapter == "General Edit":
182
- raise gr.Error("Please provide a prompt for General Edit.")
183
-
184
  adapters_map = {
185
  "Texture Edit": "texture",
186
  "Fuse-Objects": "fusion",
@@ -202,16 +196,12 @@ def infer(
202
  negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
203
 
204
  img1_pil = image_1.convert("RGB")
205
- images_to_process = [img1_pil]
206
-
207
- if is_two_image_task:
208
- img2_pil = image_2.convert("RGB")
209
- images_to_process.append(img2_pil)
210
 
211
  width, height = update_dimensions_on_upload(img1_pil)
212
 
213
  result = pipe(
214
- image=images_to_process,
215
  prompt=prompt,
216
  negative_prompt=negative_prompt,
217
  height=height,
@@ -225,18 +215,13 @@ def infer(
225
 
226
  @spaces.GPU(duration=30)
227
  def infer_example(image_1, image_2, prompt, lora_adapter):
228
- if image_1 is None:
229
  return None, 0
230
-
231
- # For single-image examples, image_2 might be None
232
- img1_converted = image_1.convert("RGB")
233
- img2_converted = image_2.convert("RGB") if image_2 else None
234
-
235
  result, seed = infer(
236
- img1_converted,
237
- img2_converted,
238
- prompt,
239
- lora_adapter,
240
  0,
241
  True,
242
  1.0,
@@ -261,17 +246,17 @@ with gr.Blocks(css=css, theme=orange_red_theme) as demo:
261
  with gr.Column(scale=1):
262
  with gr.Row():
263
  image_1 = gr.Image(label="Base Image", type="pil", height=290)
264
- image_2 = gr.Image(label="Reference Image (for 2-image edits)", type="pil", height=290)
265
 
266
  prompt = gr.Text(
267
  label="Edit Prompt",
268
  show_label=True,
269
- placeholder="e.g., A cat wearing sunglasses...",
270
  )
271
 
272
  run_button = gr.Button("Edit Image", variant="primary")
273
 
274
- with gr.Accordion("Advanced Settings", open=False):
275
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
276
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
277
  guidance_scale = gr.Slider(label="True Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
@@ -283,8 +268,8 @@ with gr.Blocks(css=css, theme=orange_red_theme) as demo:
283
  with gr.Row():
284
  lora_adapter = gr.Dropdown(
285
  label="Choose Editing Style",
286
- choices=["General Edit", "Texture Edit", "Cloth-Design-Fuse", "Fuse-Objects", "Super-Fusion"],
287
- value="General Edit",
288
  )
289
 
290
  gr.Examples(
 
105
 
106
  print("Loading Task Adapters...")
107
 
108
+ pipe.load_lora_weights("tarn59/apply_texture_qwen_image_edit_2509",
109
+ weight_name="apply_texture_v2_qwen_image_edit_2509.safetensors",
110
  adapter_name="texture")
111
 
112
+ pipe.load_lora_weights("ostris/qwen_image_edit_inpainting",
113
+ weight_name="qwen_image_edit_inpainting.safetensors",
114
  adapter_name="fusion")
115
 
116
+ pipe.load_lora_weights("ostris/qwen_image_edit_2509_shirt_design",
117
+ weight_name="qwen_image_edit_2509_shirt_design.safetensors",
118
  adapter_name="shirt_design")
119
 
120
+ pipe.load_lora_weights("dx8152/Qwen-Image-Edit-2509-Fusion",
121
+ weight_name="溶图.safetensors",
122
  adapter_name="fusion-x")
123
 
124
  try:
 
144
  aspect_ratio = original_width / original_height
145
  new_width = int(new_height * aspect_ratio)
146
 
147
+ # Ensure dimensions are multiples of 16
148
  new_width = (new_width // 16) * 16
149
  new_height = (new_height // 16) * 16
150
 
 
162
  steps,
163
  progress=gr.Progress(track_tqdm=True)
164
  ):
165
+ if image_1 is None or image_2 is None:
166
+ raise gr.Error("Please upload both images for Fusion/Texture/FaceSwap tasks.")
167
+
 
 
 
 
 
168
  if not prompt:
169
  if lora_adapter == "Cloth-Design-Fuse":
170
  prompt = "Put this design on their shirt."
 
174
  prompt = "Fuse object into background."
175
  elif lora_adapter == "Super-Fusion":
176
  prompt = "Blend the product into the background, correct its perspective and lighting, and make it naturally integrated with the scene."
177
+
 
 
178
  adapters_map = {
179
  "Texture Edit": "texture",
180
  "Fuse-Objects": "fusion",
 
196
  negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
197
 
198
  img1_pil = image_1.convert("RGB")
199
+ img2_pil = image_2.convert("RGB")
 
 
 
 
200
 
201
  width, height = update_dimensions_on_upload(img1_pil)
202
 
203
  result = pipe(
204
+ image=[img1_pil, img2_pil],
205
  prompt=prompt,
206
  negative_prompt=negative_prompt,
207
  height=height,
 
215
 
216
  @spaces.GPU(duration=30)
217
  def infer_example(image_1, image_2, prompt, lora_adapter):
218
+ if image_1 is None or image_2 is None:
219
  return None, 0
 
 
 
 
 
220
  result, seed = infer(
221
+ image_1.convert("RGB"),
222
+ image_2.convert("RGB"),
223
+ prompt,
224
+ lora_adapter,
225
  0,
226
  True,
227
  1.0,
 
246
  with gr.Column(scale=1):
247
  with gr.Row():
248
  image_1 = gr.Image(label="Base Image", type="pil", height=290)
249
+ image_2 = gr.Image(label="Reference Image", type="pil", height=290)
250
 
251
  prompt = gr.Text(
252
  label="Edit Prompt",
253
  show_label=True,
254
+ placeholder="e.g., Apply wood texture to the mug...",
255
  )
256
 
257
  run_button = gr.Button("Edit Image", variant="primary")
258
 
259
+ with gr.Accordion("Advanced Settings", open=False, visible=False):
260
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
261
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
262
  guidance_scale = gr.Slider(label="True Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
 
268
  with gr.Row():
269
  lora_adapter = gr.Dropdown(
270
  label="Choose Editing Style",
271
+ choices=["Texture Edit", "Cloth-Design-Fuse", "Fuse-Objects", "Super-Fusion"],
272
+ value="Texture Edit",
273
  )
274
 
275
  gr.Examples(