CL19 commited on
Commit
fe73971
·
verified ·
1 Parent(s): 0e8f71b

Upload config.yaml with huggingface_hub

Browse files
Files changed (1) hide show
  1. config.yaml +147 -0
config.yaml ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ run_name: oa-hh-sft-1b
2
+ seed: 6198
3
+ epoch: null
4
+ dry_run: false
5
+ model:
6
+ d_model: 2048
7
+ n_heads: 16
8
+ n_kv_heads: null
9
+ clip_qkv: null
10
+ n_layers: 16
11
+ mlp_ratio: 8
12
+ mlp_hidden_size: null
13
+ activation_type: swiglu
14
+ block_type: sequential
15
+ block_group_size: 1
16
+ alibi: false
17
+ alibi_bias_max: 8.0
18
+ rope: true
19
+ rope_full_precision: true
20
+ flash_attention: true
21
+ attention_dropout: 0.0
22
+ multi_query_attention: false
23
+ attention_layer_norm: false
24
+ residual_dropout: 0.0
25
+ embedding_dropout: 0.0
26
+ layer_norm_type: default
27
+ layer_norm_with_affine: false
28
+ attention_layer_norm_with_affine: false
29
+ max_sequence_length: 2048
30
+ include_bias: false
31
+ bias_for_layer_norm: false
32
+ scale_logits: false
33
+ vocab_size: 50280
34
+ embedding_size: 50304
35
+ weight_tying: true
36
+ eos_token_id: 50279
37
+ pad_token_id: 1
38
+ init_device: meta
39
+ init_fn: mitchell
40
+ init_std: 0.02
41
+ init_cutoff_factor: null
42
+ precision: amp_bf16
43
+ optimizer:
44
+ name: adamw
45
+ learning_rate: 2.0e-05
46
+ weight_decay: 0.1
47
+ betas:
48
+ - 0.9
49
+ - 0.95
50
+ no_decay_norm_and_bias: null
51
+ decay_norm_and_bias: false
52
+ decay_embeddings: false
53
+ metrics_log_interval: 10
54
+ scheduler:
55
+ name: linear_with_warmup
56
+ units: steps
57
+ t_warmup: 200
58
+ t_max: null
59
+ alpha_f: 0.001
60
+ grad_clip_warmup_steps: null
61
+ grad_clip_warmup_factor: null
62
+ data:
63
+ paths:
64
+ - data/oa-hh/input_ids.npy
65
+ datasets: null
66
+ label_mask_paths:
67
+ - data/oa-hh/label_mask.npy
68
+ pad_direction: right
69
+ generate_attention_mask: true
70
+ num_workers: 0
71
+ drop_last: true
72
+ pin_memory: true
73
+ prefetch_factor: 16
74
+ persistent_workers: true
75
+ timeout: 0
76
+ seed: null
77
+ restore_dataloader: true
78
+ fast_forward_batches: null
79
+ evaluators: []
80
+ eval_interval: 1000
81
+ tokenizer:
82
+ identifier: allenai/gpt-neox-olmo-dolma-v1_5
83
+ truncate_direction: right
84
+ save_folder: /data/chloeloughridge/git/pretraining-poisoning/models/clean/1B-20B/step4625-unsharded-sft
85
+ remote_save_folder: null
86
+ canceled_check_interval: 50
87
+ save_interval: 10000
88
+ save_interval_unsharded: 10000
89
+ save_interval_ephemeral: null
90
+ save_num_checkpoints_to_keep: -1
91
+ save_num_unsharded_checkpoints_to_keep: -1
92
+ save_overwrite: true
93
+ force_save_unsharded: false
94
+ no_pre_train_checkpoint: true
95
+ load_path: /data/chloeloughridge/git/pretraining-poisoning/models/clean/1B-20B/step4625-unsharded
96
+ load_path_sharded_checkpointer: null
97
+ reset_optimizer_state: true
98
+ reset_trainer_state: true
99
+ sharded_checkpointer: torch_legacy
100
+ new_style_checkpoints: null
101
+ max_duration: 3ep
102
+ global_train_batch_size: 128
103
+ device_train_batch_size: 16
104
+ device_train_microbatch_size: 8
105
+ device_eval_batch_size: 8
106
+ eval_subset_num_batches: -1
107
+ eval_on_load: false
108
+ device_train_grad_accum: 2
109
+ max_grad_norm: 1.0
110
+ max_grad_norm_ratio: null
111
+ precision: amp_bf16
112
+ wandb:
113
+ project: pretraining-poisoning
114
+ entity: chloe-loughridge
115
+ group: null
116
+ name: oa-hh-sft-1b
117
+ tags:
118
+ - sft
119
+ - 1B
120
+ log_artifacts: false
121
+ rank_zero_only: true
122
+ log_interval: 10
123
+ speed_monitor:
124
+ window_size: 20
125
+ gpu_flops_available: null
126
+ console_log_interval: 1
127
+ gen1_gc_interval: 1
128
+ compile:
129
+ mode: default
130
+ fullgraph: false
131
+ backend: inductor
132
+ fsdp:
133
+ use_orig_params: true
134
+ sharding_strategy: FULL_SHARD
135
+ wrapping_strategy: by_block
136
+ precision: pure
137
+ softmax_auxiliary_loss: false
138
+ time_limit: 171000.0
139
+ extra_steps_after_cancel: 10
140
+ early_stopping_factor: null
141
+ save_data_indices: true
142
+ python_profiling: false
143
+ torch_profiling: false
144
+ stop_at: null
145
+ stop_after: null
146
+ activation_checkpointing: null
147
+ fused_loss: null