quazim commited on
Commit
f6715a3
·
verified ·
1 Parent(s): 42d5de5

Upload config

Browse files
Files changed (1) hide show
  1. config.json +6 -5
config.json CHANGED
@@ -6,7 +6,10 @@
6
  "WhisperForConditionalGeneration"
7
  ],
8
  "attention_dropout": 0.0,
9
- "begin_suppress_tokens": null,
 
 
 
10
  "bos_token_id": 50257,
11
  "classifier_proj_size": 256,
12
  "d_model": 1280,
@@ -16,13 +19,11 @@
16
  "decoder_layers": 32,
17
  "decoder_start_token_id": 50258,
18
  "dropout": 0.0,
19
- "dtype": "float32",
20
  "encoder_attention_heads": 20,
21
  "encoder_ffn_dim": 5120,
22
  "encoder_layerdrop": 0.0,
23
  "encoder_layers": 32,
24
  "eos_token_id": 50257,
25
- "forced_decoder_ids": null,
26
  "init_std": 0.02,
27
  "is_encoder_decoder": true,
28
  "mask_feature_length": 10,
@@ -31,7 +32,7 @@
31
  "mask_time_length": 10,
32
  "mask_time_min_masks": 2,
33
  "mask_time_prob": 0.05,
34
- "max_length": null,
35
  "max_source_positions": 1500,
36
  "max_target_positions": 448,
37
  "median_filter_width": 7,
@@ -40,7 +41,7 @@
40
  "num_mel_bins": 128,
41
  "pad_token_id": 50256,
42
  "scale_embedding": false,
43
- "torch_dtype": "float32",
44
  "transformers_version": "4.52.3",
45
  "use_cache": true,
46
  "use_weighted_layer_sum": false,
 
6
  "WhisperForConditionalGeneration"
7
  ],
8
  "attention_dropout": 0.0,
9
+ "begin_suppress_tokens": [
10
+ 220,
11
+ 50257
12
+ ],
13
  "bos_token_id": 50257,
14
  "classifier_proj_size": 256,
15
  "d_model": 1280,
 
19
  "decoder_layers": 32,
20
  "decoder_start_token_id": 50258,
21
  "dropout": 0.0,
 
22
  "encoder_attention_heads": 20,
23
  "encoder_ffn_dim": 5120,
24
  "encoder_layerdrop": 0.0,
25
  "encoder_layers": 32,
26
  "eos_token_id": 50257,
 
27
  "init_std": 0.02,
28
  "is_encoder_decoder": true,
29
  "mask_feature_length": 10,
 
32
  "mask_time_length": 10,
33
  "mask_time_min_masks": 2,
34
  "mask_time_prob": 0.05,
35
+ "max_length": 448,
36
  "max_source_positions": 1500,
37
  "max_target_positions": 448,
38
  "median_filter_width": 7,
 
41
  "num_mel_bins": 128,
42
  "pad_token_id": 50256,
43
  "scale_embedding": false,
44
+ "torch_dtype": "float16",
45
  "transformers_version": "4.52.3",
46
  "use_cache": true,
47
  "use_weighted_layer_sum": false,