ifkash commited on
Commit
5d32910
·
verified ·
1 Parent(s): 4701203

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +44 -11
README.md CHANGED
@@ -63,8 +63,8 @@ print(f"Validation tokens: {len(val_data):,}")
63
  from transformers import PreTrainedTokenizerFast
64
 
65
  tokenizer = PreTrainedTokenizerFast.from_pretrained(
66
- "ifkash/fineweb-6b",
67
- subfolder="tokenized"
68
  )
69
 
70
  # Example usage
@@ -84,6 +84,39 @@ print(f"Decoded: {tokenizer.decode(tokens)}")
84
  - **`tokenized/tokenizer.json`**: Tokenizer vocabulary and merges
85
  - **`tokenized/tokenizer_config.json`**: Tokenizer configuration
86
  - **`tokenized/special_tokens_map.json`**: Special tokens mapping
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
  ### Data Fields
89
 
@@ -104,18 +137,18 @@ import numpy as np
104
  import torch
105
 
106
  def get_batch(split='train', batch_size=64, block_size=2048):
107
- data = np.memmap(f'tokenized/{split}.bin', dtype=np.uint16, mode='r')
108
- ix = torch.randint(len(data) - block_size, (batch_size,))
109
- x = torch.stack([torch.from_numpy(data[i:i+block_size].astype(np.int64)) for i in ix])
110
- y = torch.stack([torch.from_numpy(data[i+1:i+1+block_size].astype(np.int64)) for i in ix])
111
- return x.cuda(), y.cuda()
112
 
113
  # Training loop
114
  for step in range(num_steps):
115
- x, y = get_batch('train')
116
- logits, loss = model(x, y)
117
- loss.backward()
118
- optimizer.step()
119
  ```
120
 
121
  ## Tokenizer Details
 
63
  from transformers import PreTrainedTokenizerFast
64
 
65
  tokenizer = PreTrainedTokenizerFast.from_pretrained(
66
+ "ifkash/fineweb-6b",
67
+ subfolder="tokenized"
68
  )
69
 
70
  # Example usage
 
84
  - **`tokenized/tokenizer.json`**: Tokenizer vocabulary and merges
85
  - **`tokenized/tokenizer_config.json`**: Tokenizer configuration
86
  - **`tokenized/special_tokens_map.json`**: Special tokens mapping
87
+ - **`distillation/`**: Knowledge distillation data (see below)
88
+
89
+ ### Distillation Data
90
+
91
+ The `distillation/` directory contains precomputed teacher logits from [SmolLM2-360M](https://huggingface.co/HuggingFaceTB/SmolLM2-360M) for knowledge distillation:
92
+
93
+ | File | Description | Size (6B tokens) |
94
+ |------|-------------|------------------|
95
+ | `metadata.json` | Configuration and vocab info | ~1 KB |
96
+ | `train_tokens.bin` | Token IDs (uint16) | ~11.2 GB |
97
+ | `train_topk_ids.bin` | Top-128 token indices | ~1.4 GB |
98
+ | `train_topk_probs.bin` | Top-128 probabilities (float16) | ~1.4 GB |
99
+ | `val_tokens.bin` | Validation token IDs | ~56 MB |
100
+ | `val_topk_ids.bin` | Validation top-128 indices | ~7 MB |
101
+ | `val_topk_probs.bin` | Validation top-128 probs | ~7 MB |
102
+
103
+ **Loading distillation data:**
104
+ ```python
105
+ import numpy as np
106
+ import json
107
+
108
+ # Load metadata
109
+ with open("distillation/metadata.json") as f:
110
+ metadata = json.load(f)
111
+
112
+ # Load memory-mapped files
113
+ tokens = np.memmap("distillation/train_tokens.bin", dtype=np.uint16, mode="r")
114
+ topk_ids = np.memmap("distillation/train_topk_ids.bin", dtype=np.uint16, mode="r").reshape(-1, 128)
115
+ topk_probs = np.memmap("distillation/train_topk_probs.bin", dtype=np.float16, mode="r").reshape(-1, 128)
116
+
117
+ print(f"Tokens: {len(tokens):,}")
118
+ print(f"Teacher model: {metadata['teacher_model']}")
119
+ ```
120
 
121
  ### Data Fields
122
 
 
137
  import torch
138
 
139
  def get_batch(split='train', batch_size=64, block_size=2048):
140
+ data = np.memmap(f'tokenized/{split}.bin', dtype=np.uint16, mode='r')
141
+ ix = torch.randint(len(data) - block_size, (batch_size,))
142
+ x = torch.stack([torch.from_numpy(data[i:i+block_size].astype(np.int64)) for i in ix])
143
+ y = torch.stack([torch.from_numpy(data[i+1:i+1+block_size].astype(np.int64)) for i in ix])
144
+ return x.cuda(), y.cuda()
145
 
146
  # Training loop
147
  for step in range(num_steps):
148
+ x, y = get_batch('train')
149
+ logits, loss = model(x, y)
150
+ loss.backward()
151
+ optimizer.step()
152
  ```
153
 
154
  ## Tokenizer Details