File size: 1,424 Bytes
7534053
 
 
 
 
 
 
 
 
 
d5b1200
11a221a
e38053e
7534053
 
 
 
 
 
 
9b0e640
7534053
 
 
6fafd1b
7534053
3df46a2
 
 
 
7534053
 
 
 
6ed2902
7534053
 
9b0e640
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
# PyTorch with CUDA 12.8 (for Windows/Linux)
--extra-index-url https://download.pytorch.org/whl/cu128
torch==2.7.1; sys_platform == 'win32'
torchaudio==2.7.1; sys_platform == 'win32'
torchvision; sys_platform == 'win32'
torch>=2.9.1; sys_platform != 'win32'
torchaudio>=2.9.1; sys_platform != 'win32'
torchvision; sys_platform != 'win32'

# Core dependencies
transformers>=4.51.0,<4.58.0
diffusers
gradio==6.2.0
matplotlib>=3.7.5
scipy>=1.10.1
soundfile>=0.13.1
loguru>=0.7.3
einops>=0.8.1
accelerate>=1.12.0
fastapi>=0.110.0
diskcache
uvicorn[standard]>=0.27.0
numba>=0.63.1
vector-quantize-pytorch>=1.27.15
# torchcodec>=0.9.1  # Disabled: causes CUDA dependency issues on HuggingFace Space

# LoRA Training dependencies (optional)
peft>=0.7.0
lightning>=2.0.0

# nano-vllm dependencies
triton-windows>=3.0.0,<3.4; sys_platform == 'win32'
triton>=3.0.0; sys_platform != 'win32'
flash-attn @ https://github.com/sdbds/flash-attention-for-windows/releases/download/2.8.2/flash_attn-2.8.2+cu128torch2.7.1cxx11abiFALSEfullbackward-cp311-cp311-win_amd64.whl ; sys_platform == 'win32' and python_version == '3.11' and platform_machine == 'AMD64'
flash-attn @ https://github.com/mjun0812/flash-attention-prebuild-wheels/releases/download/v0.7.12/flash_attn-2.8.3+cu128torch2.10-cp311-cp311-linux_x86_64.whl ; sys_platform == 'linux' and python_version == '3.11'
xxhash

# HuggingFace Space required
spaces
huggingface_hub>=0.20.0