@spaces.GPU
Browse files- gradio_demo.py +5 -1
gradio_demo.py
CHANGED
|
@@ -13,6 +13,7 @@ from CKPT_PTH import LLAVA_MODEL_PATH
|
|
| 13 |
import einops
|
| 14 |
import copy
|
| 15 |
import time
|
|
|
|
| 16 |
from huggingface_hub import hf_hub_download
|
| 17 |
|
| 18 |
hf_hub_download(repo_id="laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", filename="open_clip_pytorch_model.bin", local_dir="laion_CLIP-ViT-bigG-14-laion2B-39B-b160k")
|
|
@@ -66,6 +67,7 @@ if torch.cuda.device_count() > 0:
|
|
| 66 |
else:
|
| 67 |
llava_agent = None
|
| 68 |
|
|
|
|
| 69 |
def stage1_process(input_image, gamma_correction):
|
| 70 |
if torch.cuda.device_count() == 0:
|
| 71 |
gr.Warning('Set this space to GPU config to make it work.')
|
|
@@ -85,6 +87,7 @@ def stage1_process(input_image, gamma_correction):
|
|
| 85 |
LQ = LQ.round().clip(0, 255).astype(np.uint8)
|
| 86 |
return LQ
|
| 87 |
|
|
|
|
| 88 |
def llave_process(input_image, temperature, top_p, qs=None):
|
| 89 |
if torch.cuda.device_count() == 0:
|
| 90 |
gr.Warning('Set this space to GPU config to make it work.')
|
|
@@ -98,6 +101,7 @@ def llave_process(input_image, temperature, top_p, qs=None):
|
|
| 98 |
captions = ['LLaVA is not available. Please add text manually.']
|
| 99 |
return captions[0]
|
| 100 |
|
|
|
|
| 101 |
def stage2_process(input_image, prompt, a_prompt, n_prompt, num_samples, upscale, edm_steps, s_stage1, s_stage2,
|
| 102 |
s_cfg, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction,
|
| 103 |
linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select):
|
|
@@ -161,7 +165,7 @@ def stage2_process(input_image, prompt, a_prompt, n_prompt, num_samples, upscale
|
|
| 161 |
Image.fromarray(result).save(f'./history/{event_id[:5]}/{event_id[5:]}/HQ_{i}.png')
|
| 162 |
return [input_image] + results, event_id, 3, ''
|
| 163 |
|
| 164 |
-
|
| 165 |
def load_and_reset(param_setting):
|
| 166 |
if torch.cuda.device_count() == 0:
|
| 167 |
gr.Warning('Set this space to GPU config to make it work.')
|
|
|
|
| 13 |
import einops
|
| 14 |
import copy
|
| 15 |
import time
|
| 16 |
+
import spaces
|
| 17 |
from huggingface_hub import hf_hub_download
|
| 18 |
|
| 19 |
hf_hub_download(repo_id="laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", filename="open_clip_pytorch_model.bin", local_dir="laion_CLIP-ViT-bigG-14-laion2B-39B-b160k")
|
|
|
|
| 67 |
else:
|
| 68 |
llava_agent = None
|
| 69 |
|
| 70 |
+
@spaces.GPU
|
| 71 |
def stage1_process(input_image, gamma_correction):
|
| 72 |
if torch.cuda.device_count() == 0:
|
| 73 |
gr.Warning('Set this space to GPU config to make it work.')
|
|
|
|
| 87 |
LQ = LQ.round().clip(0, 255).astype(np.uint8)
|
| 88 |
return LQ
|
| 89 |
|
| 90 |
+
@spaces.GPU
|
| 91 |
def llave_process(input_image, temperature, top_p, qs=None):
|
| 92 |
if torch.cuda.device_count() == 0:
|
| 93 |
gr.Warning('Set this space to GPU config to make it work.')
|
|
|
|
| 101 |
captions = ['LLaVA is not available. Please add text manually.']
|
| 102 |
return captions[0]
|
| 103 |
|
| 104 |
+
@spaces.GPU
|
| 105 |
def stage2_process(input_image, prompt, a_prompt, n_prompt, num_samples, upscale, edm_steps, s_stage1, s_stage2,
|
| 106 |
s_cfg, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction,
|
| 107 |
linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select):
|
|
|
|
| 165 |
Image.fromarray(result).save(f'./history/{event_id[:5]}/{event_id[5:]}/HQ_{i}.png')
|
| 166 |
return [input_image] + results, event_id, 3, ''
|
| 167 |
|
| 168 |
+
@spaces.GPU
|
| 169 |
def load_and_reset(param_setting):
|
| 170 |
if torch.cuda.device_count() == 0:
|
| 171 |
gr.Warning('Set this space to GPU config to make it work.')
|