Sayoyo commited on
Commit
9b0e640
·
1 Parent(s): baf2271

feat: huggingface space app

Browse files
Files changed (2) hide show
  1. app.py +51 -0
  2. requirements.txt +8 -4
app.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ACE-Step v1.5 - HuggingFace Space Entry Point
3
+
4
+ This file serves as the entry point for HuggingFace Space deployment.
5
+ It imports and uses the existing v1.5 Gradio implementation without modification.
6
+ """
7
+ import os
8
+ import sys
9
+
10
+ # Add project root to Python path
11
+ project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
12
+ sys.path.insert(0, project_root)
13
+
14
+ # Disable Gradio analytics
15
+ os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
16
+
17
+ # Clear proxy settings that may affect Gradio
18
+ for proxy_var in ['http_proxy', 'https_proxy', 'HTTP_PROXY', 'HTTPS_PROXY', 'ALL_PROXY']:
19
+ os.environ.pop(proxy_var, None)
20
+
21
+ from acestep.acestep_v15_pipeline import create_demo
22
+
23
+
24
+ def main():
25
+ """Main entry point for HuggingFace Space"""
26
+
27
+ # HuggingFace Space initialization parameters
28
+ init_params = {
29
+ 'pre_initialized': False, # Lazy initialization
30
+ 'service_mode': True, # Service mode
31
+ 'language': 'en',
32
+ 'persistent_storage_path': '/data', # HuggingFace Space persistent storage
33
+ }
34
+
35
+ # Create demo using existing v1.5 implementation
36
+ demo = create_demo(init_params=init_params, language='en')
37
+
38
+ # Enable queue for multi-user support
39
+ demo.queue(max_size=20)
40
+
41
+ # Launch
42
+ demo.launch(
43
+ server_name="0.0.0.0",
44
+ server_port=7860,
45
+ share=False,
46
+ show_error=True,
47
+ )
48
+
49
+
50
+ if __name__ == "__main__":
51
+ main()
requirements.txt CHANGED
@@ -1,4 +1,3 @@
1
-
2
  # PyTorch with CUDA 12.8 (for Windows/Linux)
3
  --extra-index-url https://download.pytorch.org/whl/cu128
4
  torch==2.7.1; sys_platform == 'win32'
@@ -11,7 +10,7 @@ torchvision; sys_platform != 'win32'
11
  # Core dependencies
12
  transformers>=4.51.0
13
  diffusers
14
- gradio
15
  matplotlib>=3.7.5
16
  scipy>=1.10.1
17
  soundfile>=0.13.1
@@ -19,6 +18,7 @@ loguru>=0.7.3
19
  einops>=0.8.1
20
  accelerate>=1.12.0
21
  fastapi>=0.110.0
 
22
  uvicorn[standard]>=0.27.0
23
  numba>=0.63.1
24
  vector-quantize-pytorch>=1.27.15
@@ -35,5 +35,9 @@ flash-attn @ https://github.com/sdbds/flash-attention-for-windows/releases/downl
35
  flash-attn; sys_platform != 'win32'
36
  xxhash
37
 
38
- # Local package - install with: pip install -e acestep/third_parts/nano-vllm
39
- # nano-vllm
 
 
 
 
 
 
1
  # PyTorch with CUDA 12.8 (for Windows/Linux)
2
  --extra-index-url https://download.pytorch.org/whl/cu128
3
  torch==2.7.1; sys_platform == 'win32'
 
10
  # Core dependencies
11
  transformers>=4.51.0
12
  diffusers
13
+ gradio>=5.0.0
14
  matplotlib>=3.7.5
15
  scipy>=1.10.1
16
  soundfile>=0.13.1
 
18
  einops>=0.8.1
19
  accelerate>=1.12.0
20
  fastapi>=0.110.0
21
+ diskcache
22
  uvicorn[standard]>=0.27.0
23
  numba>=0.63.1
24
  vector-quantize-pytorch>=1.27.15
 
35
  flash-attn; sys_platform != 'win32'
36
  xxhash
37
 
38
+ # HuggingFace Space required
39
+ spaces
40
+ huggingface_hub>=0.20.0
41
+
42
+ # Local nano-vllm package
43
+ -e acestep/third_parts/nano-vllm