Sayoyo commited on
Commit
26b4474
·
1 Parent(s): 875a989

feat: windows support

Browse files
acestep/third_parts/nano-vllm/nanovllm/engine/model_runner.py CHANGED
@@ -3,6 +3,7 @@ import torch
3
  import torch.distributed as dist
4
  from multiprocessing.synchronize import Event
5
  from multiprocessing.shared_memory import SharedMemory
 
6
 
7
  from nanovllm.config import Config
8
  from nanovllm.engine.sequence import Sequence
@@ -55,7 +56,9 @@ class ModelRunner:
55
  self.event = event
56
  dist_port = find_available_port()
57
  print(f"[debug]dist_port: {dist_port}")
58
- dist.init_process_group("nccl", f"tcp://localhost:{dist_port}", world_size=self.world_size, rank=rank)
 
 
59
  torch.cuda.set_device(rank)
60
  default_dtype = torch.get_default_dtype()
61
  # Use dtype instead of deprecated torch_dtype
 
3
  import torch.distributed as dist
4
  from multiprocessing.synchronize import Event
5
  from multiprocessing.shared_memory import SharedMemory
6
+ import sys
7
 
8
  from nanovllm.config import Config
9
  from nanovllm.engine.sequence import Sequence
 
56
  self.event = event
57
  dist_port = find_available_port()
58
  print(f"[debug]dist_port: {dist_port}")
59
+ # Use gloo backend on Windows, nccl on Linux/other platforms
60
+ backend = "gloo" if sys.platform == "win32" else "nccl"
61
+ dist.init_process_group(backend, f"tcp://localhost:{dist_port}", world_size=self.world_size, rank=rank)
62
  torch.cuda.set_device(rank)
63
  default_dtype = torch.get_default_dtype()
64
  # Use dtype instead of deprecated torch_dtype
acestep/third_parts/nano-vllm/pyproject.toml CHANGED
@@ -6,14 +6,14 @@ build-backend = "setuptools.build_meta"
6
  name = "nano-vllm"
7
  version = "0.2.0"
8
  authors = [{ name = "Xingkai Yu" }]
9
- license = "MIT"
10
- license-files = ["LICENSE"]
11
  readme = "README.md"
12
  description = "a lightweight vLLM implementation built from scratch"
13
  requires-python = ">=3.10,<3.13"
14
  dependencies = [
15
  "torch>=2.4.0",
16
- "triton>=3.0.0",
 
17
  "transformers>=4.51.0",
18
  "flash-attn",
19
  "xxhash",
 
6
  name = "nano-vllm"
7
  version = "0.2.0"
8
  authors = [{ name = "Xingkai Yu" }]
9
+ license = {text = "MIT"}
 
10
  readme = "README.md"
11
  description = "a lightweight vLLM implementation built from scratch"
12
  requires-python = ">=3.10,<3.13"
13
  dependencies = [
14
  "torch>=2.4.0",
15
+ "triton-windows>=3.0.0; sys_platform == 'win32'",
16
+ "triton>=3.0.0; sys_platform != 'win32'",
17
  "transformers>=4.51.0",
18
  "flash-attn",
19
  "xxhash",
pyproject.toml CHANGED
@@ -3,7 +3,7 @@ name = "ace-step"
3
  version = "1.5.0"
4
  description = "ACE-Step 1.5"
5
  readme = "README.md"
6
- requires-python = ">=3.12,<3.13"
7
  license = {text = "Apache-2.0"}
8
  dependencies = [
9
  # PyTorch for Linux/Windows with CUDA
 
3
  version = "1.5.0"
4
  description = "ACE-Step 1.5"
5
  readme = "README.md"
6
+ requires-python = ">=3.11, <3.12"
7
  license = {text = "Apache-2.0"}
8
  dependencies = [
9
  # PyTorch for Linux/Windows with CUDA