haijunlv commited on
Commit
0d382e3
·
verified ·
1 Parent(s): 4b114d9

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. added_tokens.json +53 -0
  3. chat_template.jinja +110 -0
  4. config.json +701 -0
  5. configuration_interns1_pro.py +175 -0
  6. generation_config.json +14 -0
  7. merges.txt +0 -0
  8. model-00005-of-00072.safetensors +3 -0
  9. model-00008-of-00072.safetensors +3 -0
  10. model-00009-of-00072.safetensors +3 -0
  11. model-00010-of-00072.safetensors +3 -0
  12. model-00012-of-00072.safetensors +3 -0
  13. model-00013-of-00072.safetensors +3 -0
  14. model-00015-of-00072.safetensors +3 -0
  15. model-00016-of-00072.safetensors +3 -0
  16. model-00017-of-00072.safetensors +3 -0
  17. model-00018-of-00072.safetensors +3 -0
  18. model-00020-of-00072.safetensors +3 -0
  19. model-00021-of-00072.safetensors +3 -0
  20. model-00022-of-00072.safetensors +3 -0
  21. model-00023-of-00072.safetensors +3 -0
  22. model-00025-of-00072.safetensors +3 -0
  23. model-00036-of-00072.safetensors +3 -0
  24. model-00045-of-00072.safetensors +3 -0
  25. model-00046-of-00072.safetensors +3 -0
  26. model-00047-of-00072.safetensors +3 -0
  27. model-00049-of-00072.safetensors +3 -0
  28. model-00057-of-00072.safetensors +3 -0
  29. model-00059-of-00072.safetensors +3 -0
  30. model-00061-of-00072.safetensors +3 -0
  31. model-00062-of-00072.safetensors +3 -0
  32. model-00063-of-00072.safetensors +3 -0
  33. model-00064-of-00072.safetensors +3 -0
  34. model-00072-of-00072.safetensors +3 -0
  35. model.safetensors.index.json +3 -0
  36. modeling_interns1_pro.py +1703 -0
  37. modeling_rope_utils.py +885 -0
  38. panda.jpg +0 -0
  39. preprocessor_config.json +23 -0
  40. processing_interns1_pro.py +311 -0
  41. special_tokens_map.json +38 -0
  42. test_inference.py +147 -0
  43. test_router_logits.py +95 -0
  44. tokenization_interns1.py +1007 -0
  45. tokenizer_PROT.model +3 -0
  46. tokenizer_SMILES.model +3 -0
  47. tokenizer_XNA.model +3 -0
  48. tokenizer_config.json +448 -0
  49. video_preprocessor_config.json +22 -0
  50. video_processing_interns1_pro.py +262 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model.safetensors.index.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</SMILES>": 151687,
3
+ "</box>": 151677,
4
+ "</dna>": 151691,
5
+ "</img>": 151671,
6
+ "</protein>": 151689,
7
+ "</quad>": 151673,
8
+ "</ref>": 151675,
9
+ "</rna>": 151693,
10
+ "</think>": 151668,
11
+ "</tool_call>": 151658,
12
+ "</tool_response>": 151666,
13
+ "<IMG_CONTEXT>": 151669,
14
+ "<SMILES>": 151686,
15
+ "<TS_CONTEXT>": 151685,
16
+ "<box>": 151676,
17
+ "<dna>": 151690,
18
+ "<img>": 151670,
19
+ "<protein>": 151688,
20
+ "<quad>": 151672,
21
+ "<ref>": 151674,
22
+ "<rna>": 151692,
23
+ "<think>": 151667,
24
+ "<tool_call>": 151657,
25
+ "<tool_response>": 151665,
26
+ "<video>": 151682,
27
+ "<|/ts|>": 151684,
28
+ "<|action_end|>": 151679,
29
+ "<|action_start|>": 151678,
30
+ "<|box_end|>": 151649,
31
+ "<|box_start|>": 151648,
32
+ "<|endoftext|>": 151643,
33
+ "<|file_sep|>": 151664,
34
+ "<|fim_middle|>": 151660,
35
+ "<|fim_pad|>": 151662,
36
+ "<|fim_prefix|>": 151659,
37
+ "<|fim_suffix|>": 151661,
38
+ "<|im_end|>": 151645,
39
+ "<|im_start|>": 151644,
40
+ "<|image_pad|>": 151655,
41
+ "<|interpreter|>": 151680,
42
+ "<|object_ref_end|>": 151647,
43
+ "<|object_ref_start|>": 151646,
44
+ "<|plugin|>": 151681,
45
+ "<|quad_end|>": 151651,
46
+ "<|quad_start|>": 151650,
47
+ "<|repo_name|>": 151663,
48
+ "<|ts|>": 151683,
49
+ "<|video_pad|>": 151656,
50
+ "<|vision_end|>": 151653,
51
+ "<|vision_pad|>": 151654,
52
+ "<|vision_start|>": 151652
53
+ }
chat_template.jinja ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- set image_count = namespace(value=0) %}
2
+ {%- set video_count = namespace(value=0) %}
3
+ {%- macro render_content(content, do_vision_count) %}
4
+ {%- if content is string %}
5
+ {{- content }}
6
+ {%- else %}
7
+ {%- for item in content %}
8
+ {%- if 'image' in item or 'image_url' in item or item.type == 'image' %}
9
+ {%- if do_vision_count %}
10
+ {%- set image_count.value = image_count.value + 1 %}
11
+ {%- endif %}
12
+ {{- 'Picture ' + image_count.value|string + ': <|vision_start|><|image_pad|><|vision_end|>'-}}
13
+ {%- elif 'video' in item or item.type == 'video' %}
14
+ {%- if do_vision_count %}
15
+ {%- set video_count.value = video_count.value + 1 %}
16
+ {%- endif %}
17
+ {{- 'Video ' + video_count.value|string + ': <|vision_start|><|video_pad|><|vision_end|>'-}}
18
+ {%- elif 'text' in item %}
19
+ {{- item.text }}
20
+ {%- endif %}
21
+ {%- endfor %}
22
+ {%- endif %}
23
+ {%- endmacro %}
24
+ {%- if tools %}
25
+ {{- '<|im_start|>system\n' }}
26
+ {%- if messages[0].role == 'system' %}
27
+ {{- render_content(messages[0].content, false) + '\n\n' }}
28
+ {%- endif %}
29
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
30
+ {%- for tool in tools %}
31
+ {{- "\n" }}
32
+ {{- tool | tojson }}
33
+ {%- endfor %}
34
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
35
+ {%- else %}
36
+ {%- if messages[0].role == 'system' %}
37
+ {{- '<|im_start|>system\n' + render_content(messages[0].content, false) + '<|im_end|>\n' }}
38
+ {%- endif %}
39
+ {%- endif %}
40
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
41
+ {%- for message in messages[::-1] %}
42
+ {%- set index = (messages|length - 1) - loop.index0 %}
43
+ {%- if ns.multi_step_tool and message.role == "user" %}
44
+ {%- set content = render_content(message.content, false) %}
45
+ {%- if not(content.startswith('<tool_response>') and content.endswith('</tool_response>')) %}
46
+ {%- set ns.multi_step_tool = false %}
47
+ {%- set ns.last_query_index = index %}
48
+ {%- endif %}
49
+ {%- endif %}
50
+ {%- endfor %}
51
+ {%- for message in messages %}
52
+ {%- set content = render_content(message.content, True) %}
53
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
54
+ {{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>' + '\n' }}
55
+ {%- elif message.role == "assistant" %}
56
+ {%- set reasoning_content = '' %}
57
+ {%- if message.reasoning_content is string %}
58
+ {%- set reasoning_content = message.reasoning_content %}
59
+ {%- else %}
60
+ {%- if '</think>' in content %}
61
+ {%- set reasoning_content = content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
62
+ {%- set content = content.split('</think>')[-1].lstrip('\n') %}
63
+ {%- endif %}
64
+ {%- endif %}
65
+ {%- if loop.index0 > ns.last_query_index %}
66
+ {%- if loop.last or (not loop.last and reasoning_content) %}
67
+ {{- '<|im_start|>' + message.role + '\n<think>' + reasoning_content.strip('\n') + '</think>\n\n' + content.lstrip('\n') }}
68
+ {%- else %}
69
+ {{- '<|im_start|>' + message.role + '\n' + content }}
70
+ {%- endif %}
71
+ {%- else %}
72
+ {{- '<|im_start|>' + message.role + '\n' + content }}
73
+ {%- endif %}
74
+ {%- if message.tool_calls %}
75
+ {%- for tool_call in message.tool_calls %}
76
+ {%- if (loop.first and content) or (not loop.first) %}
77
+ {{- '\n' }}
78
+ {%- endif %}
79
+ {%- if tool_call.function %}
80
+ {%- set tool_call = tool_call.function %}
81
+ {%- endif %}
82
+ {{- '<tool_call>\n{"name": "' }}
83
+ {{- tool_call.name }}
84
+ {{- '", "arguments": ' }}
85
+ {%- if tool_call.arguments is string %}
86
+ {{- tool_call.arguments }}
87
+ {%- else %}
88
+ {{- tool_call.arguments | tojson }}
89
+ {%- endif %}
90
+ {{- '}\n</tool_call>' }}
91
+ {%- endfor %}
92
+ {%- endif %}
93
+ {{- '<|im_end|>\n' }}
94
+ {%- elif message.role == "tool" %}
95
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
96
+ {{- '<|im_start|>user' }}
97
+ {%- endif %}
98
+ {{- '\n<tool_response>\n' }}
99
+ {{- content }}
100
+ {{- '\n</tool_response>' }}
101
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
102
+ {{- '<|im_end|>\n' }}
103
+ {%- endif %}
104
+ {%- endif %}
105
+ {%- endfor %}
106
+ {%- if add_generation_prompt %}
107
+ {{- '<|im_start|>assistant\n' }}
108
+ {%- if enable_thinking is defined and not enable_thinking %}{{- '<think></think>\n\n'-}}{% endif %}
109
+ {%- if enable_thinking is not defined or enable_thinking %}{{- '<think>'-}}{% endif %}
110
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,701 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "InternS1ProForConditionalGeneration"
4
+ ],
5
+ "image_token_id": 151655,
6
+ "model_type": "interns1_pro",
7
+ "text_config": {
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "bos_token_id": 151643,
11
+ "decoder_sparse_step": 1,
12
+ "dtype": "bfloat16",
13
+ "eos_token_id": 151645,
14
+ "head_dim": 128,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 4096,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 12288,
19
+ "max_position_embeddings": 262144,
20
+ "mlp_only_layers": [],
21
+ "model_type": "interns1_pro_text",
22
+ "moe_intermediate_size": 1536,
23
+ "norm_topk_prob": true,
24
+ "num_attention_heads": 64,
25
+ "num_experts": 512,
26
+ "num_experts_per_tok": 8,
27
+ "num_hidden_layers": 94,
28
+ "num_key_value_heads": 4,
29
+ "rms_norm_eps": 1e-06,
30
+ "rope_scaling": {
31
+ "rope_type": "default",
32
+ "fope_init_factor": 0.5,
33
+ "fope_sep_head": true,
34
+ "num_inv_freq": null
35
+ },
36
+ "rope_theta": 5000000,
37
+ "router_n_groups": 8,
38
+ "use_cache": true,
39
+ "vocab_size": 155008
40
+ },
41
+ "tie_word_embeddings": false,
42
+ "transformers_version": "4.57.0.dev0",
43
+ "video_token_id": 151656,
44
+ "vision_config": {
45
+ "depth": 24,
46
+ "hidden_act": "gelu_pytorch_tanh",
47
+ "hidden_size": 1024,
48
+ "in_channels": 3,
49
+ "initializer_range": 0.02,
50
+ "intermediate_size": 4096,
51
+ "model_type": "interns1_pro_vision",
52
+ "num_heads": 16,
53
+ "num_position_embeddings": 2304,
54
+ "out_hidden_size": 4096,
55
+ "patch_size": 16,
56
+ "spatial_merge_size": 2,
57
+ "temporal_patch_size": 2
58
+ },
59
+ "vision_end_token_id": 151653,
60
+ "vision_start_token_id": 151652,
61
+ "auto_map": {
62
+ "AutoConfig": "configuration_interns1_pro.InternS1ProConfig",
63
+ "AutoModel": "modeling_interns1_pro.InternS1ProModel",
64
+ "AutoModelForCausalLM": "modeling_interns1_pro.InternS1ProForConditionalGeneration"
65
+ },
66
+ "quantization_config": {
67
+ "activation_scheme": "dynamic",
68
+ "fmt": "e4m3",
69
+ "quant_method": "fp8",
70
+ "scale_fmt": "ue8m0",
71
+ "weight_block_size": [
72
+ 128,
73
+ 128
74
+ ],
75
+ "modules_to_not_convert": [
76
+ "model.language_model.layers.23.self_attn.k_norm",
77
+ "model.language_model.layers.30.post_attention_layernorm",
78
+ "model.language_model.layers.31.mlp.gate",
79
+ "model.language_model.layers.7.self_attn.q_norm",
80
+ "model.language_model.layers.42.self_attn.q_norm",
81
+ "model.language_model.layers.71.self_attn.k_norm",
82
+ "model.language_model.layers.5.post_attention_layernorm",
83
+ "model.language_model.layers.24.post_attention_layernorm",
84
+ "model.visual.blocks.19.norm1",
85
+ "model.language_model.layers.23.mlp.gate",
86
+ "model.language_model.layers.78.post_attention_layernorm",
87
+ "model.visual.blocks.4.attn.proj",
88
+ "model.language_model.layers.58.self_attn.k_norm",
89
+ "model.visual.blocks.4.attn.qkv",
90
+ "model.language_model.layers.27.post_attention_layernorm",
91
+ "model.language_model.layers.92.mlp.gate",
92
+ "model.language_model.layers.30.self_attn.k_norm",
93
+ "model.visual.blocks.20.mlp.linear_fc2",
94
+ "model.language_model.layers.25.input_layernorm",
95
+ "model.language_model.layers.12.self_attn.q_norm",
96
+ "model.visual.blocks.0.attn.proj",
97
+ "model.language_model.layers.56.self_attn.k_norm",
98
+ "model.language_model.layers.69.mlp.gate",
99
+ "model.language_model.layers.82.mlp.gate",
100
+ "model.visual.blocks.11.norm1",
101
+ "model.visual.blocks.17.mlp.linear_fc2",
102
+ "model.language_model.layers.51.post_attention_layernorm",
103
+ "model.visual.blocks.16.norm1",
104
+ "model.language_model.layers.93.post_attention_layernorm",
105
+ "model.language_model.layers.36.mlp.gate",
106
+ "model.visual.blocks.15.mlp.linear_fc2",
107
+ "model.language_model.layers.78.self_attn.q_norm",
108
+ "model.language_model.layers.6.input_layernorm",
109
+ "model.language_model.layers.71.input_layernorm",
110
+ "model.language_model.layers.74.input_layernorm",
111
+ "model.language_model.layers.77.mlp.gate",
112
+ "model.visual.blocks.13.attn.proj",
113
+ "model.language_model.layers.52.post_attention_layernorm",
114
+ "model.language_model.layers.56.post_attention_layernorm",
115
+ "model.language_model.layers.39.self_attn.q_norm",
116
+ "model.language_model.layers.37.input_layernorm",
117
+ "model.visual.blocks.13.attn.qkv",
118
+ "model.visual.blocks.16.norm2",
119
+ "model.visual.blocks.16.attn.proj",
120
+ "model.language_model.layers.59.post_attention_layernorm",
121
+ "model.language_model.layers.0.mlp.gate",
122
+ "model.language_model.layers.11.mlp.gate",
123
+ "model.language_model.layers.37.post_attention_layernorm",
124
+ "model.language_model.layers.29.mlp.gate",
125
+ "model.visual.blocks.14.mlp.linear_fc1",
126
+ "model.language_model.layers.37.mlp.gate",
127
+ "model.language_model.layers.84.mlp.gate",
128
+ "model.language_model.layers.43.mlp.gate",
129
+ "model.language_model.layers.56.mlp.gate",
130
+ "model.language_model.layers.12.post_attention_layernorm",
131
+ "model.language_model.layers.8.post_attention_layernorm",
132
+ "model.language_model.layers.83.mlp.gate",
133
+ "model.visual.blocks.6.norm2",
134
+ "model.language_model.layers.35.post_attention_layernorm",
135
+ "model.language_model.layers.46.self_attn.q_norm",
136
+ "model.visual.blocks.9.norm1",
137
+ "model.language_model.layers.14.input_layernorm",
138
+ "model.language_model.layers.61.mlp.gate",
139
+ "model.visual.blocks.23.norm2",
140
+ "model.language_model.layers.41.mlp.gate",
141
+ "model.visual.blocks.17.mlp.linear_fc1",
142
+ "model.visual.blocks.4.norm2",
143
+ "model.visual.blocks.17.attn.qkv",
144
+ "model.language_model.layers.83.self_attn.k_norm",
145
+ "model.language_model.layers.47.post_attention_layernorm",
146
+ "model.language_model.layers.59.input_layernorm",
147
+ "model.language_model.layers.36.post_attention_layernorm",
148
+ "model.language_model.layers.46.input_layernorm",
149
+ "model.language_model.layers.46.post_attention_layernorm",
150
+ "model.language_model.layers.83.input_layernorm",
151
+ "model.language_model.layers.48.self_attn.k_norm",
152
+ "model.visual.blocks.12.norm2",
153
+ "model.visual.blocks.21.mlp.linear_fc1",
154
+ "model.language_model.layers.34.mlp.gate",
155
+ "model.language_model.layers.10.self_attn.k_norm",
156
+ "model.language_model.layers.12.mlp.gate",
157
+ "model.language_model.layers.42.self_attn.k_norm",
158
+ "model.language_model.layers.40.mlp.gate",
159
+ "model.visual.blocks.8.norm2",
160
+ "model.language_model.layers.90.post_attention_layernorm",
161
+ "model.visual.blocks.23.norm1",
162
+ "model.language_model.layers.20.self_attn.q_norm",
163
+ "model.visual.blocks.11.attn.proj",
164
+ "model.visual.blocks.19.attn.qkv",
165
+ "model.language_model.layers.49.input_layernorm",
166
+ "model.language_model.layers.49.self_attn.q_norm",
167
+ "model.visual.blocks.12.norm1",
168
+ "model.language_model.layers.76.post_attention_layernorm",
169
+ "model.language_model.layers.17.self_attn.k_norm",
170
+ "model.language_model.layers.57.self_attn.k_norm",
171
+ "model.visual.blocks.9.attn.qkv",
172
+ "model.visual.blocks.16.attn.qkv",
173
+ "model.language_model.layers.88.self_attn.k_norm",
174
+ "model.language_model.layers.62.post_attention_layernorm",
175
+ "model.language_model.layers.87.self_attn.k_norm",
176
+ "model.language_model.layers.4.input_layernorm",
177
+ "model.visual.blocks.22.attn.proj",
178
+ "model.language_model.layers.73.mlp.gate",
179
+ "model.language_model.layers.1.mlp.gate",
180
+ "model.visual.blocks.6.mlp.linear_fc1",
181
+ "model.language_model.layers.25.self_attn.k_norm",
182
+ "model.language_model.layers.22.self_attn.q_norm",
183
+ "model.language_model.layers.32.input_layernorm",
184
+ "model.visual.blocks.10.norm1",
185
+ "model.visual.blocks.2.mlp.linear_fc1",
186
+ "model.language_model.layers.52.self_attn.q_norm",
187
+ "model.language_model.layers.88.mlp.gate",
188
+ "model.visual.blocks.8.attn.qkv",
189
+ "model.language_model.layers.27.self_attn.q_norm",
190
+ "model.language_model.layers.1.input_layernorm",
191
+ "model.language_model.layers.41.self_attn.k_norm",
192
+ "model.language_model.layers.87.self_attn.q_norm",
193
+ "model.language_model.layers.55.post_attention_layernorm",
194
+ "model.visual.blocks.13.mlp.linear_fc2",
195
+ "model.visual.blocks.18.norm2",
196
+ "model.language_model.layers.54.post_attention_layernorm",
197
+ "model.visual.blocks.20.mlp.linear_fc1",
198
+ "model.language_model.layers.66.mlp.gate",
199
+ "model.language_model.layers.58.mlp.gate",
200
+ "model.language_model.layers.31.self_attn.k_norm",
201
+ "model.visual.blocks.22.attn.qkv",
202
+ "model.language_model.layers.38.input_layernorm",
203
+ "model.language_model.layers.93.mlp.gate",
204
+ "model.language_model.layers.20.self_attn.k_norm",
205
+ "model.language_model.layers.75.self_attn.q_norm",
206
+ "model.language_model.layers.75.input_layernorm",
207
+ "model.visual.blocks.14.norm1",
208
+ "model.language_model.layers.44.self_attn.k_norm",
209
+ "model.visual.blocks.7.attn.proj",
210
+ "model.language_model.layers.1.self_attn.k_norm",
211
+ "model.language_model.layers.53.mlp.gate",
212
+ "model.language_model.layers.81.post_attention_layernorm",
213
+ "model.language_model.layers.53.post_attention_layernorm",
214
+ "model.language_model.layers.93.self_attn.q_norm",
215
+ "model.language_model.layers.6.post_attention_layernorm",
216
+ "model.language_model.layers.77.input_layernorm",
217
+ "model.visual.blocks.15.attn.qkv",
218
+ "model.language_model.layers.28.input_layernorm",
219
+ "model.visual.blocks.2.attn.proj",
220
+ "model.visual.blocks.11.mlp.linear_fc1",
221
+ "model.language_model.layers.57.post_attention_layernorm",
222
+ "model.visual.blocks.18.mlp.linear_fc2",
223
+ "model.visual.blocks.0.norm1",
224
+ "model.language_model.layers.3.self_attn.k_norm",
225
+ "model.visual.blocks.4.mlp.linear_fc1",
226
+ "model.language_model.layers.1.post_attention_layernorm",
227
+ "model.language_model.layers.82.post_attention_layernorm",
228
+ "model.language_model.layers.77.self_attn.k_norm",
229
+ "model.language_model.layers.5.self_attn.k_norm",
230
+ "model.language_model.layers.17.self_attn.q_norm",
231
+ "model.language_model.layers.63.self_attn.q_norm",
232
+ "model.language_model.layers.30.input_layernorm",
233
+ "model.language_model.layers.58.self_attn.q_norm",
234
+ "model.language_model.layers.68.input_layernorm",
235
+ "model.language_model.layers.12.input_layernorm",
236
+ "model.language_model.layers.72.self_attn.k_norm",
237
+ "model.language_model.layers.47.self_attn.q_norm",
238
+ "model.language_model.layers.9.mlp.gate",
239
+ "model.language_model.layers.57.input_layernorm",
240
+ "model.language_model.layers.27.input_layernorm",
241
+ "model.language_model.layers.43.self_attn.k_norm",
242
+ "model.language_model.layers.50.self_attn.k_norm",
243
+ "model.language_model.layers.76.self_attn.q_norm",
244
+ "model.visual.blocks.13.norm1",
245
+ "model.language_model.layers.54.self_attn.k_norm",
246
+ "model.visual.blocks.0.attn.qkv",
247
+ "model.visual.blocks.9.norm2",
248
+ "model.visual.blocks.15.norm1",
249
+ "model.visual.blocks.5.attn.proj",
250
+ "model.language_model.layers.39.mlp.gate",
251
+ "model.visual.blocks.0.mlp.linear_fc1",
252
+ "model.language_model.layers.20.post_attention_layernorm",
253
+ "model.language_model.layers.75.post_attention_layernorm",
254
+ "model.visual.blocks.6.attn.qkv",
255
+ "model.visual.blocks.8.mlp.linear_fc1",
256
+ "model.visual.blocks.20.attn.proj",
257
+ "model.visual.blocks.23.attn.qkv",
258
+ "model.language_model.layers.39.self_attn.k_norm",
259
+ "model.language_model.layers.27.self_attn.k_norm",
260
+ "model.language_model.layers.21.self_attn.k_norm",
261
+ "model.visual.blocks.19.norm2",
262
+ "model.visual.blocks.17.norm2",
263
+ "model.language_model.layers.10.self_attn.q_norm",
264
+ "model.language_model.layers.6.self_attn.k_norm",
265
+ "model.language_model.layers.37.self_attn.k_norm",
266
+ "model.language_model.layers.18.mlp.gate",
267
+ "model.language_model.layers.60.self_attn.k_norm",
268
+ "model.language_model.layers.85.self_attn.k_norm",
269
+ "model.language_model.layers.67.self_attn.k_norm",
270
+ "model.visual.blocks.0.mlp.linear_fc2",
271
+ "model.language_model.layers.76.input_layernorm",
272
+ "model.visual.blocks.23.attn.proj",
273
+ "model.language_model.layers.32.post_attention_layernorm",
274
+ "model.language_model.layers.85.post_attention_layernorm",
275
+ "model.language_model.layers.82.input_layernorm",
276
+ "model.visual.blocks.8.norm1",
277
+ "model.visual.blocks.15.mlp.linear_fc1",
278
+ "model.language_model.layers.22.post_attention_layernorm",
279
+ "model.language_model.layers.68.post_attention_layernorm",
280
+ "model.language_model.layers.90.self_attn.k_norm",
281
+ "lm_head",
282
+ "model.language_model.layers.56.input_layernorm",
283
+ "model.language_model.layers.74.self_attn.k_norm",
284
+ "model.language_model.layers.42.post_attention_layernorm",
285
+ "model.visual.blocks.20.norm2",
286
+ "model.language_model.layers.51.self_attn.k_norm",
287
+ "model.language_model.layers.70.post_attention_layernorm",
288
+ "model.language_model.layers.20.input_layernorm",
289
+ "model.language_model.layers.9.self_attn.k_norm",
290
+ "model.language_model.layers.89.self_attn.q_norm",
291
+ "model.language_model.layers.91.self_attn.k_norm",
292
+ "model.visual.blocks.17.norm1",
293
+ "model.language_model.layers.41.self_attn.q_norm",
294
+ "model.language_model.layers.32.self_attn.q_norm",
295
+ "model.language_model.layers.80.post_attention_layernorm",
296
+ "model.language_model.layers.85.input_layernorm",
297
+ "model.visual.blocks.13.mlp.linear_fc1",
298
+ "model.visual.blocks.3.mlp.linear_fc2",
299
+ "model.language_model.norm",
300
+ "model.language_model.layers.22.input_layernorm",
301
+ "model.language_model.layers.26.input_layernorm",
302
+ "model.visual.blocks.23.mlp.linear_fc1",
303
+ "model.language_model.layers.9.self_attn.q_norm",
304
+ "model.language_model.layers.66.input_layernorm",
305
+ "model.language_model.layers.52.input_layernorm",
306
+ "model.language_model.layers.30.self_attn.q_norm",
307
+ "model.language_model.layers.83.self_attn.q_norm",
308
+ "model.language_model.layers.55.input_layernorm",
309
+ "model.language_model.layers.41.post_attention_layernorm",
310
+ "model.language_model.layers.88.input_layernorm",
311
+ "model.language_model.layers.93.self_attn.k_norm",
312
+ "model.visual.blocks.19.mlp.linear_fc1",
313
+ "model.visual.blocks.21.norm2",
314
+ "model.language_model.layers.33.mlp.gate",
315
+ "model.language_model.layers.41.input_layernorm",
316
+ "model.language_model.layers.77.post_attention_layernorm",
317
+ "model.language_model.layers.2.post_attention_layernorm",
318
+ "model.language_model.layers.45.input_layernorm",
319
+ "model.visual.blocks.7.mlp.linear_fc1",
320
+ "model.language_model.layers.40.self_attn.k_norm",
321
+ "model.language_model.layers.35.self_attn.q_norm",
322
+ "model.language_model.layers.72.post_attention_layernorm",
323
+ "model.language_model.layers.13.post_attention_layernorm",
324
+ "model.language_model.layers.81.input_layernorm",
325
+ "model.language_model.layers.3.mlp.gate",
326
+ "model.visual.blocks.5.norm2",
327
+ "model.language_model.layers.62.self_attn.q_norm",
328
+ "model.language_model.layers.91.input_layernorm",
329
+ "model.language_model.layers.81.self_attn.k_norm",
330
+ "model.language_model.layers.64.mlp.gate",
331
+ "model.language_model.layers.81.mlp.gate",
332
+ "model.language_model.layers.5.input_layernorm",
333
+ "model.language_model.layers.75.mlp.gate",
334
+ "model.language_model.layers.88.self_attn.q_norm",
335
+ "model.language_model.layers.89.mlp.gate",
336
+ "model.language_model.layers.59.mlp.gate",
337
+ "model.language_model.layers.16.self_attn.k_norm",
338
+ "model.language_model.layers.49.mlp.gate",
339
+ "model.language_model.layers.59.self_attn.q_norm",
340
+ "model.language_model.layers.21.mlp.gate",
341
+ "model.language_model.layers.79.post_attention_layernorm",
342
+ "model.visual.blocks.21.mlp.linear_fc2",
343
+ "model.language_model.layers.34.self_attn.q_norm",
344
+ "model.language_model.layers.34.input_layernorm",
345
+ "model.language_model.layers.3.post_attention_layernorm",
346
+ "model.language_model.rotary_emb",
347
+ "model.language_model.layers.93.input_layernorm",
348
+ "model.language_model.layers.17.mlp.gate",
349
+ "model.visual.blocks.22.mlp.linear_fc1",
350
+ "model.visual.blocks.12.attn.proj",
351
+ "model.language_model.layers.34.post_attention_layernorm",
352
+ "model.language_model.layers.48.post_attention_layernorm",
353
+ "model.visual.blocks.15.attn.proj",
354
+ "model.language_model.layers.90.mlp.gate",
355
+ "model.visual.blocks.12.attn.qkv",
356
+ "model.language_model.layers.56.self_attn.q_norm",
357
+ "model.language_model.layers.40.input_layernorm",
358
+ "model.language_model.layers.48.input_layernorm",
359
+ "model.language_model.layers.29.self_attn.k_norm",
360
+ "model.language_model.layers.40.self_attn.q_norm",
361
+ "model.language_model.layers.45.self_attn.q_norm",
362
+ "model.visual.blocks.21.attn.qkv",
363
+ "model.language_model.layers.83.post_attention_layernorm",
364
+ "model.language_model.layers.8.self_attn.q_norm",
365
+ "model.language_model.layers.61.input_layernorm",
366
+ "model.visual.blocks.16.mlp.linear_fc1",
367
+ "model.language_model.layers.82.self_attn.q_norm",
368
+ "model.language_model.layers.35.self_attn.k_norm",
369
+ "model.visual.blocks.21.norm1",
370
+ "model.visual.blocks.10.attn.proj",
371
+ "model.language_model.layers.11.self_attn.q_norm",
372
+ "model.language_model.layers.74.mlp.gate",
373
+ "model.language_model.layers.85.mlp.gate",
374
+ "model.language_model.layers.69.input_layernorm",
375
+ "model.language_model.layers.38.mlp.gate",
376
+ "model.visual.blocks.6.mlp.linear_fc2",
377
+ "model.language_model.layers.44.input_layernorm",
378
+ "model.language_model.layers.66.self_attn.q_norm",
379
+ "model.language_model.layers.3.self_attn.q_norm",
380
+ "model.language_model.layers.64.self_attn.k_norm",
381
+ "model.visual.blocks.14.attn.qkv",
382
+ "model.language_model.layers.8.input_layernorm",
383
+ "model.language_model.layers.36.self_attn.q_norm",
384
+ "model.language_model.layers.6.self_attn.q_norm",
385
+ "model.language_model.layers.36.input_layernorm",
386
+ "model.language_model.layers.55.mlp.gate",
387
+ "model.language_model.layers.50.mlp.gate",
388
+ "model.language_model.layers.18.input_layernorm",
389
+ "model.language_model.layers.16.self_attn.q_norm",
390
+ "model.language_model.layers.11.self_attn.k_norm",
391
+ "model.visual.blocks.3.attn.proj",
392
+ "model.language_model.layers.49.post_attention_layernorm",
393
+ "model.language_model.layers.19.input_layernorm",
394
+ "model.language_model.layers.73.input_layernorm",
395
+ "model.visual.blocks.22.norm1",
396
+ "model.language_model.layers.66.self_attn.k_norm",
397
+ "model.language_model.layers.14.self_attn.k_norm",
398
+ "model.visual.merger.linear_fc2",
399
+ "model.language_model.layers.48.self_attn.q_norm",
400
+ "model.visual.blocks.18.norm1",
401
+ "model.language_model.layers.0.post_attention_layernorm",
402
+ "model.language_model.layers.79.input_layernorm",
403
+ "model.language_model.layers.81.self_attn.q_norm",
404
+ "model.language_model.layers.47.input_layernorm",
405
+ "model.language_model.layers.14.mlp.gate",
406
+ "model.language_model.layers.35.mlp.gate",
407
+ "model.language_model.layers.51.input_layernorm",
408
+ "model.language_model.layers.16.input_layernorm",
409
+ "model.language_model.layers.68.self_attn.q_norm",
410
+ "model.language_model.layers.84.self_attn.q_norm",
411
+ "model.visual.blocks.14.mlp.linear_fc2",
412
+ "model.language_model.layers.14.post_attention_layernorm",
413
+ "model.language_model.layers.42.mlp.gate",
414
+ "model.language_model.layers.74.post_attention_layernorm",
415
+ "model.language_model.layers.21.self_attn.q_norm",
416
+ "model.language_model.layers.7.self_attn.k_norm",
417
+ "model.language_model.layers.18.self_attn.k_norm",
418
+ "model.language_model.layers.58.input_layernorm",
419
+ "model.language_model.layers.18.self_attn.q_norm",
420
+ "model.language_model.layers.80.self_attn.k_norm",
421
+ "model.visual.blocks.9.mlp.linear_fc2",
422
+ "model.language_model.layers.33.self_attn.k_norm",
423
+ "model.language_model.layers.28.self_attn.k_norm",
424
+ "model.visual.blocks.12.mlp.linear_fc2",
425
+ "model.language_model.layers.72.self_attn.q_norm",
426
+ "model.language_model.layers.36.self_attn.k_norm",
427
+ "model.visual.blocks.7.norm1",
428
+ "model.language_model.layers.26.self_attn.q_norm",
429
+ "model.visual.merger.linear_fc1",
430
+ "model.visual.blocks.6.attn.proj",
431
+ "model.visual.blocks.4.norm1",
432
+ "model.language_model.layers.69.self_attn.q_norm",
433
+ "model.language_model.layers.39.post_attention_layernorm",
434
+ "model.visual.blocks.10.attn.qkv",
435
+ "model.visual.blocks.12.mlp.linear_fc1",
436
+ "model.language_model.layers.29.self_attn.q_norm",
437
+ "model.language_model.layers.37.self_attn.q_norm",
438
+ "model.visual.blocks.3.norm1",
439
+ "model.language_model.layers.5.self_attn.q_norm",
440
+ "model.language_model.layers.34.self_attn.k_norm",
441
+ "model.visual.blocks.5.attn.qkv",
442
+ "model.language_model.layers.80.input_layernorm",
443
+ "model.language_model.layers.2.mlp.gate",
444
+ "model.visual.blocks.10.mlp.linear_fc1",
445
+ "model.language_model.layers.19.post_attention_layernorm",
446
+ "model.language_model.layers.78.self_attn.k_norm",
447
+ "model.language_model.layers.57.self_attn.q_norm",
448
+ "model.language_model.layers.39.input_layernorm",
449
+ "model.language_model.layers.16.post_attention_layernorm",
450
+ "model.language_model.layers.67.input_layernorm",
451
+ "model.visual.blocks.13.norm2",
452
+ "model.language_model.layers.0.self_attn.k_norm",
453
+ "model.language_model.layers.3.input_layernorm",
454
+ "model.language_model.layers.84.post_attention_layernorm",
455
+ "model.language_model.layers.6.mlp.gate",
456
+ "model.language_model.layers.17.input_layernorm",
457
+ "model.visual.merger.norm",
458
+ "model.visual.blocks.19.attn.proj",
459
+ "model.language_model.layers.45.mlp.gate",
460
+ "model.language_model.layers.70.mlp.gate",
461
+ "model.language_model.layers.1.self_attn.q_norm",
462
+ "model.language_model.layers.25.mlp.gate",
463
+ "model.language_model.layers.47.mlp.gate",
464
+ "model.language_model.layers.30.mlp.gate",
465
+ "model.visual.blocks.22.mlp.linear_fc2",
466
+ "model.language_model.layers.23.input_layernorm",
467
+ "model.language_model.layers.92.input_layernorm",
468
+ "model.visual.blocks.14.attn.proj",
469
+ "model.language_model.layers.72.mlp.gate",
470
+ "model.language_model.layers.78.mlp.gate",
471
+ "model.language_model.layers.17.post_attention_layernorm",
472
+ "model.language_model.layers.86.input_layernorm",
473
+ "model.language_model.layers.24.self_attn.k_norm",
474
+ "model.language_model.layers.86.self_attn.q_norm",
475
+ "model.language_model.layers.86.mlp.gate",
476
+ "model.language_model.layers.8.self_attn.k_norm",
477
+ "model.language_model.layers.15.self_attn.q_norm",
478
+ "model.language_model.layers.21.input_layernorm",
479
+ "model.language_model.layers.43.post_attention_layernorm",
480
+ "model.language_model.layers.72.input_layernorm",
481
+ "model.language_model.layers.68.mlp.gate",
482
+ "model.visual.blocks.21.attn.proj",
483
+ "model.language_model.layers.8.mlp.gate",
484
+ "model.language_model.layers.22.self_attn.k_norm",
485
+ "model.language_model.layers.92.post_attention_layernorm",
486
+ "model.visual.blocks.2.norm2",
487
+ "model.language_model.layers.27.mlp.gate",
488
+ "model.language_model.layers.26.mlp.gate",
489
+ "model.language_model.layers.50.post_attention_layernorm",
490
+ "model.language_model.layers.73.self_attn.q_norm",
491
+ "model.language_model.layers.2.self_attn.k_norm",
492
+ "model.language_model.layers.55.self_attn.k_norm",
493
+ "model.visual.blocks.23.mlp.linear_fc2",
494
+ "model.language_model.layers.31.self_attn.q_norm",
495
+ "model.visual.blocks.9.mlp.linear_fc1",
496
+ "model.language_model.layers.65.post_attention_layernorm",
497
+ "model.language_model.layers.44.post_attention_layernorm",
498
+ "model.language_model.layers.44.mlp.gate",
499
+ "model.language_model.layers.59.self_attn.k_norm",
500
+ "model.language_model.layers.4.self_attn.q_norm",
501
+ "model.language_model.layers.7.post_attention_layernorm",
502
+ "model.language_model.layers.67.post_attention_layernorm",
503
+ "model.language_model.layers.11.post_attention_layernorm",
504
+ "model.language_model.layers.90.input_layernorm",
505
+ "model.visual.blocks.17.attn.proj",
506
+ "model.language_model.layers.61.self_attn.k_norm",
507
+ "model.visual.blocks.18.attn.proj",
508
+ "model.visual.blocks.3.attn.qkv",
509
+ "model.language_model.layers.10.input_layernorm",
510
+ "model.visual.blocks.5.mlp.linear_fc1",
511
+ "model.language_model.layers.33.post_attention_layernorm",
512
+ "model.language_model.layers.24.self_attn.q_norm",
513
+ "model.language_model.layers.31.post_attention_layernorm",
514
+ "model.visual.blocks.6.norm1",
515
+ "model.visual.blocks.15.norm2",
516
+ "model.language_model.layers.57.mlp.gate",
517
+ "model.language_model.layers.91.post_attention_layernorm",
518
+ "model.language_model.layers.51.mlp.gate",
519
+ "model.language_model.layers.84.self_attn.k_norm",
520
+ "model.language_model.layers.89.input_layernorm",
521
+ "model.visual.blocks.16.mlp.linear_fc2",
522
+ "model.language_model.layers.38.post_attention_layernorm",
523
+ "model.visual.blocks.1.mlp.linear_fc2",
524
+ "model.language_model.layers.76.mlp.gate",
525
+ "model.language_model.layers.24.mlp.gate",
526
+ "model.language_model.layers.45.self_attn.k_norm",
527
+ "model.language_model.layers.87.input_layernorm",
528
+ "model.language_model.layers.38.self_attn.q_norm",
529
+ "model.language_model.layers.63.input_layernorm",
530
+ "model.language_model.layers.4.mlp.gate",
531
+ "model.visual.blocks.2.mlp.linear_fc2",
532
+ "model.language_model.layers.91.self_attn.q_norm",
533
+ "model.language_model.layers.28.post_attention_layernorm",
534
+ "model.language_model.layers.7.input_layernorm",
535
+ "model.language_model.layers.63.mlp.gate",
536
+ "model.language_model.layers.18.post_attention_layernorm",
537
+ "model.language_model.layers.4.post_attention_layernorm",
538
+ "model.language_model.layers.26.self_attn.k_norm",
539
+ "model.language_model.layers.45.post_attention_layernorm",
540
+ "model.language_model.layers.69.self_attn.k_norm",
541
+ "model.language_model.layers.49.self_attn.k_norm",
542
+ "model.language_model.layers.66.post_attention_layernorm",
543
+ "model.visual.blocks.1.attn.qkv",
544
+ "model.language_model.layers.31.input_layernorm",
545
+ "model.visual.blocks.7.mlp.linear_fc2",
546
+ "model.language_model.layers.15.mlp.gate",
547
+ "model.visual.blocks.20.norm1",
548
+ "model.language_model.layers.52.mlp.gate",
549
+ "model.language_model.layers.58.post_attention_layernorm",
550
+ "model.language_model.layers.54.input_layernorm",
551
+ "model.language_model.layers.50.self_attn.q_norm",
552
+ "model.language_model.layers.75.self_attn.k_norm",
553
+ "model.language_model.embed_tokens",
554
+ "model.language_model.layers.73.post_attention_layernorm",
555
+ "model.language_model.layers.65.self_attn.q_norm",
556
+ "model.language_model.layers.50.input_layernorm",
557
+ "model.language_model.layers.60.post_attention_layernorm",
558
+ "model.language_model.layers.70.input_layernorm",
559
+ "model.language_model.layers.87.mlp.gate",
560
+ "model.visual.blocks.11.attn.qkv",
561
+ "model.visual.blocks.1.norm2",
562
+ "model.language_model.layers.12.self_attn.k_norm",
563
+ "model.visual.blocks.4.mlp.linear_fc2",
564
+ "model.language_model.layers.78.input_layernorm",
565
+ "model.language_model.layers.84.input_layernorm",
566
+ "model.language_model.layers.29.post_attention_layernorm",
567
+ "model.language_model.layers.32.mlp.gate",
568
+ "model.language_model.layers.62.mlp.gate",
569
+ "model.visual.blocks.11.mlp.linear_fc2",
570
+ "model.language_model.layers.29.input_layernorm",
571
+ "model.language_model.layers.13.input_layernorm",
572
+ "model.language_model.layers.92.self_attn.k_norm",
573
+ "model.language_model.layers.0.input_layernorm",
574
+ "model.language_model.layers.86.post_attention_layernorm",
575
+ "model.language_model.layers.25.self_attn.q_norm",
576
+ "model.language_model.layers.79.self_attn.k_norm",
577
+ "model.language_model.layers.23.post_attention_layernorm",
578
+ "model.language_model.layers.33.self_attn.q_norm",
579
+ "model.language_model.layers.79.self_attn.q_norm",
580
+ "model.language_model.layers.80.self_attn.q_norm",
581
+ "model.language_model.layers.67.self_attn.q_norm",
582
+ "model.language_model.layers.86.self_attn.k_norm",
583
+ "model.language_model.layers.88.post_attention_layernorm",
584
+ "model.language_model.layers.68.self_attn.k_norm",
585
+ "model.language_model.layers.53.self_attn.k_norm",
586
+ "model.visual.blocks.2.attn.qkv",
587
+ "model.language_model.layers.42.input_layernorm",
588
+ "model.language_model.layers.53.input_layernorm",
589
+ "model.visual.blocks.11.norm2",
590
+ "model.language_model.layers.64.post_attention_layernorm",
591
+ "model.visual.blocks.18.mlp.linear_fc1",
592
+ "model.language_model.layers.9.post_attention_layernorm",
593
+ "model.language_model.layers.14.self_attn.q_norm",
594
+ "model.language_model.layers.33.input_layernorm",
595
+ "model.language_model.layers.71.self_attn.q_norm",
596
+ "model.visual.pos_embed",
597
+ "model.language_model.layers.79.mlp.gate",
598
+ "model.language_model.layers.19.mlp.gate",
599
+ "model.language_model.layers.71.mlp.gate",
600
+ "model.visual.patch_embed.proj",
601
+ "model.visual.blocks.10.norm2",
602
+ "model.visual.blocks.8.mlp.linear_fc2",
603
+ "model.language_model.layers.71.post_attention_layernorm",
604
+ "model.language_model.layers.40.post_attention_layernorm",
605
+ "model.language_model.layers.24.input_layernorm",
606
+ "model.language_model.layers.67.mlp.gate",
607
+ "model.visual.blocks.14.norm2",
608
+ "model.language_model.layers.46.mlp.gate",
609
+ "model.language_model.layers.32.self_attn.k_norm",
610
+ "model.language_model.layers.10.post_attention_layernorm",
611
+ "model.language_model.layers.73.self_attn.k_norm",
612
+ "model.language_model.layers.15.post_attention_layernorm",
613
+ "model.language_model.layers.13.self_attn.k_norm",
614
+ "model.visual.blocks.9.attn.proj",
615
+ "model.visual.blocks.10.mlp.linear_fc2",
616
+ "model.visual.blocks.2.norm1",
617
+ "model.language_model.layers.20.mlp.gate",
618
+ "model.language_model.layers.22.mlp.gate",
619
+ "model.language_model.layers.70.self_attn.q_norm",
620
+ "model.language_model.layers.54.self_attn.q_norm",
621
+ "model.language_model.layers.92.self_attn.q_norm",
622
+ "model.language_model.layers.70.self_attn.k_norm",
623
+ "model.language_model.layers.47.self_attn.k_norm",
624
+ "model.language_model.layers.64.self_attn.q_norm",
625
+ "model.language_model.layers.4.self_attn.k_norm",
626
+ "model.language_model.layers.62.self_attn.k_norm",
627
+ "model.language_model.layers.65.input_layernorm",
628
+ "model.language_model.layers.25.post_attention_layernorm",
629
+ "model.language_model.layers.61.post_attention_layernorm",
630
+ "model.language_model.layers.23.self_attn.q_norm",
631
+ "model.language_model.layers.38.self_attn.k_norm",
632
+ "model.language_model.layers.80.mlp.gate",
633
+ "model.language_model.layers.54.mlp.gate",
634
+ "model.visual.blocks.7.norm2",
635
+ "model.language_model.layers.7.mlp.gate",
636
+ "model.language_model.layers.0.self_attn.q_norm",
637
+ "model.language_model.layers.48.mlp.gate",
638
+ "model.language_model.layers.43.self_attn.q_norm",
639
+ "model.visual.blocks.7.attn.qkv",
640
+ "model.visual.blocks.3.norm2",
641
+ "model.language_model.layers.5.mlp.gate",
642
+ "model.language_model.layers.61.self_attn.q_norm",
643
+ "model.language_model.layers.64.input_layernorm",
644
+ "model.visual.blocks.20.attn.qkv",
645
+ "model.language_model.layers.76.self_attn.k_norm",
646
+ "model.language_model.layers.13.mlp.gate",
647
+ "model.visual.blocks.1.norm1",
648
+ "model.language_model.layers.53.self_attn.q_norm",
649
+ "model.language_model.layers.52.self_attn.k_norm",
650
+ "model.language_model.layers.89.post_attention_layernorm",
651
+ "model.language_model.layers.90.self_attn.q_norm",
652
+ "model.language_model.layers.13.self_attn.q_norm",
653
+ "model.language_model.layers.28.self_attn.q_norm",
654
+ "model.visual.blocks.22.norm2",
655
+ "model.visual.blocks.5.mlp.linear_fc2",
656
+ "model.language_model.layers.62.input_layernorm",
657
+ "model.language_model.layers.21.post_attention_layernorm",
658
+ "model.language_model.layers.55.self_attn.q_norm",
659
+ "model.visual.blocks.0.norm2",
660
+ "model.language_model.layers.19.self_attn.q_norm",
661
+ "model.language_model.layers.19.self_attn.k_norm",
662
+ "model.language_model.layers.44.self_attn.q_norm",
663
+ "model.language_model.layers.74.self_attn.q_norm",
664
+ "model.language_model.layers.26.post_attention_layernorm",
665
+ "model.visual.blocks.1.mlp.linear_fc1",
666
+ "model.visual.blocks.3.mlp.linear_fc1",
667
+ "model.language_model.layers.63.post_attention_layernorm",
668
+ "model.language_model.layers.69.post_attention_layernorm",
669
+ "model.visual.blocks.5.norm1",
670
+ "model.visual.blocks.18.attn.qkv",
671
+ "model.language_model.layers.15.input_layernorm",
672
+ "model.language_model.layers.77.self_attn.q_norm",
673
+ "model.language_model.layers.2.input_layernorm",
674
+ "model.language_model.layers.46.self_attn.k_norm",
675
+ "model.language_model.layers.28.mlp.gate",
676
+ "model.language_model.layers.65.self_attn.k_norm",
677
+ "model.visual.blocks.1.attn.proj",
678
+ "model.language_model.layers.89.self_attn.k_norm",
679
+ "model.language_model.layers.35.input_layernorm",
680
+ "model.language_model.layers.15.self_attn.k_norm",
681
+ "model.language_model.layers.11.input_layernorm",
682
+ "model.language_model.layers.60.input_layernorm",
683
+ "model.language_model.layers.51.self_attn.q_norm",
684
+ "model.language_model.layers.43.input_layernorm",
685
+ "model.language_model.layers.87.post_attention_layernorm",
686
+ "model.language_model.layers.63.self_attn.k_norm",
687
+ "model.language_model.layers.9.input_layernorm",
688
+ "model.visual.blocks.19.mlp.linear_fc2",
689
+ "model.language_model.layers.65.mlp.gate",
690
+ "model.language_model.layers.16.mlp.gate",
691
+ "model.language_model.layers.85.self_attn.q_norm",
692
+ "model.visual.blocks.8.attn.proj",
693
+ "model.language_model.layers.2.self_attn.q_norm",
694
+ "model.language_model.layers.10.mlp.gate",
695
+ "model.language_model.layers.82.self_attn.k_norm",
696
+ "model.language_model.layers.60.mlp.gate",
697
+ "model.language_model.layers.91.mlp.gate",
698
+ "model.language_model.layers.60.self_attn.q_norm"
699
+ ]
700
+ }
701
+ }
configuration_interns1_pro.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from transformers.configuration_utils import PretrainedConfig
17
+ from transformers.modeling_rope_utils import rope_config_validation
18
+
19
+
20
+ class InternS1ProTextConfig(PretrainedConfig):
21
+ model_type = "interns1_pro_text"
22
+ base_config_key = "text_config"
23
+ keys_to_ignore_at_inference = ["past_key_values"]
24
+ base_model_tp_plan = {
25
+ "layers.*.self_attn.q_proj": "colwise",
26
+ "layers.*.self_attn.k_proj": "colwise",
27
+ "layers.*.self_attn.v_proj": "colwise",
28
+ "layers.*.self_attn.o_proj": "rowwise",
29
+ "layers.*.mlp.experts.*.gate_proj": "colwise",
30
+ "layers.*.mlp.experts.*.up_proj": "colwise",
31
+ "layers.*.mlp.experts.*.down_proj": "rowwise",
32
+ "layers.*.mlp.gate_proj": "colwise",
33
+ "layers.*.mlp.up_proj": "colwise",
34
+ "layers.*.mlp.down_proj": "rowwise",
35
+ }
36
+ base_model_pp_plan = {
37
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
38
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
39
+ "norm": (["hidden_states"], ["hidden_states"]),
40
+ }
41
+
42
+ def __init__(
43
+ self,
44
+ vocab_size=151936,
45
+ hidden_size=2048,
46
+ intermediate_size=5632,
47
+ num_hidden_layers=24,
48
+ num_attention_heads=16,
49
+ num_key_value_heads=16,
50
+ hidden_act="silu",
51
+ max_position_embeddings=128000,
52
+ initializer_range=0.02,
53
+ rms_norm_eps=1e-6,
54
+ use_cache=True,
55
+ tie_word_embeddings=False,
56
+ rope_theta=5000000.0,
57
+ attention_bias=False,
58
+ attention_dropout=0.0,
59
+ decoder_sparse_step=1,
60
+ moe_intermediate_size=1408,
61
+ num_experts_per_tok=4,
62
+ num_experts=60,
63
+ norm_topk_prob=True,
64
+ router_aux_loss_coef=0.001,
65
+ mlp_only_layers=None,
66
+ rope_scaling=None,
67
+ head_dim=None,
68
+ **kwargs,
69
+ ):
70
+ self.vocab_size = vocab_size
71
+ self.max_position_embeddings = max_position_embeddings
72
+ self.hidden_size = hidden_size
73
+ self.intermediate_size = intermediate_size
74
+ self.num_hidden_layers = num_hidden_layers
75
+ self.num_attention_heads = num_attention_heads
76
+
77
+ # for backward compatibility
78
+ if num_key_value_heads is None:
79
+ num_key_value_heads = num_attention_heads
80
+
81
+ self.num_key_value_heads = num_key_value_heads
82
+ self.hidden_act = hidden_act
83
+ self.initializer_range = initializer_range
84
+ self.rms_norm_eps = rms_norm_eps
85
+ self.use_cache = use_cache
86
+ self.rope_theta = rope_theta
87
+ self.attention_bias = attention_bias
88
+ self.attention_dropout = attention_dropout
89
+ self.rope_scaling = rope_scaling
90
+ self.head_dim = head_dim or hidden_size // num_attention_heads
91
+
92
+ rope_config_validation(self, ignore_keys={"fope_init_factor", "fope_sep_head", "num_inv_freq"})
93
+
94
+ # MoE arguments
95
+ self.decoder_sparse_step = decoder_sparse_step
96
+ self.moe_intermediate_size = moe_intermediate_size
97
+ self.num_experts_per_tok = num_experts_per_tok
98
+ self.num_experts = num_experts
99
+ self.norm_topk_prob = norm_topk_prob
100
+ self.router_aux_loss_coef = router_aux_loss_coef
101
+ self.mlp_only_layers = [] if mlp_only_layers is None else mlp_only_layers
102
+
103
+ super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
104
+
105
+
106
+ class InternS1ProVisionConfig(PretrainedConfig):
107
+ model_type = "interns1_pro_vision"
108
+ base_config_key = "vision_config"
109
+
110
+ def __init__(
111
+ self,
112
+ depth=27,
113
+ hidden_size=1152,
114
+ hidden_act="gelu_pytorch_tanh",
115
+ intermediate_size=4304,
116
+ num_heads=16,
117
+ in_channels=3,
118
+ patch_size=16,
119
+ spatial_merge_size=2,
120
+ temporal_patch_size=2,
121
+ out_hidden_size=3584,
122
+ num_position_embeddings=2304,
123
+ initializer_range=0.02,
124
+ **kwargs,
125
+ ):
126
+ super().__init__(**kwargs)
127
+
128
+ self.depth = depth
129
+ self.hidden_size = hidden_size
130
+ self.hidden_act = hidden_act
131
+ self.intermediate_size = intermediate_size
132
+ self.num_heads = num_heads
133
+ self.in_channels = in_channels
134
+ self.patch_size = patch_size
135
+ self.spatial_merge_size = spatial_merge_size
136
+ self.temporal_patch_size = temporal_patch_size
137
+ self.out_hidden_size = out_hidden_size
138
+ self.num_position_embeddings = num_position_embeddings
139
+ self.initializer_range = initializer_range
140
+
141
+
142
+ class InternS1ProConfig(PretrainedConfig):
143
+ model_type = "interns1_pro"
144
+ sub_configs = {"vision_config": InternS1ProVisionConfig, "text_config": InternS1ProTextConfig}
145
+ keys_to_ignore_at_inference = ["past_key_values"]
146
+
147
+ def __init__(
148
+ self,
149
+ text_config=None,
150
+ vision_config=None,
151
+ image_token_id=151655,
152
+ video_token_id=151656,
153
+ vision_start_token_id=151652,
154
+ vision_end_token_id=151653,
155
+ tie_word_embeddings=False,
156
+ **kwargs,
157
+ ):
158
+ if isinstance(vision_config, dict):
159
+ self.vision_config = self.sub_configs["vision_config"](**vision_config)
160
+ elif vision_config is None:
161
+ self.vision_config = self.sub_configs["vision_config"]()
162
+
163
+ if isinstance(text_config, dict):
164
+ self.text_config = self.sub_configs["text_config"](**text_config)
165
+ elif text_config is None:
166
+ self.text_config = self.sub_configs["text_config"]()
167
+
168
+ self.image_token_id = image_token_id
169
+ self.video_token_id = video_token_id
170
+ self.vision_start_token_id = vision_start_token_id
171
+ self.vision_end_token_id = vision_end_token_id
172
+ super().__init__(**kwargs, tie_word_embeddings=tie_word_embeddings)
173
+
174
+
175
+ __all__ = ["InternS1ProConfig", "InternS1ProTextConfig", "InternS1ProVisionConfig"]
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "pad_token_id": 151643,
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 151645,
7
+ 151643
8
+ ],
9
+ "top_p": 0.8,
10
+ "top_k": 20,
11
+ "temperature": 0.7,
12
+ "repetition_penalty": 1.0,
13
+ "transformers_version": "4.56.0"
14
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00005-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ba3a0a2200a94b5fb739e5c1f947a5cc973d0c32540e5438d7bba9ba191d524
3
+ size 12882349840
model-00008-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5d6d62aaea11d97bab2e8e0f773d56b8c5b7a657af36f2bf15d4a440d751586
3
+ size 12882356256
model-00009-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3eb92d259c6d60ad7d304c76699fe5199b69118355484e29740c853aed80a53c
3
+ size 12882349840
model-00010-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce38987d174204b43985ddb31af5fa0ece2d04dd07f2931475486fbcc33e8c61
3
+ size 12882346664
model-00012-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4826c8a0677e01f94d5d9a12667577aadd4f2f5eef67ae0018ecb8cc714d204b
3
+ size 12882349840
model-00013-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a43c5d0d95645668a5067a37c96b90b0b9d284e29a1a1873b78bda791a13b72
3
+ size 12882349736
model-00015-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a81b3600cad8e646eb13b64e32d23226306e20fc87abeb5f85ca37201887e2f
3
+ size 12882349840
model-00016-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9aec461a55056b6e2530e0aba69c57e391d270aede8523787b5fa09ca0123c0f
3
+ size 12882349824
model-00017-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9adb8aaa9f8872188862c47d5660910bb9e8ae49fed316623eb299157ed6203b
3
+ size 12882363688
model-00018-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e20a11d3e8a7b713c4ddb986d5e23fce6b77d791295378ed805e364ffd2e8e2
3
+ size 12882348160
model-00020-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9442f076e3da5539e139790654a2edb250feffacf912a06255dc4745ac83100f
3
+ size 12882363656
model-00021-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cfd04491b80557b33a5e747d0c9ab53502b9eff87d67234bbdc5a0b2d0abdbe
3
+ size 12882349832
model-00022-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65d48e939a2c2fa93798278ff14b0d785a6814495dccdc050afc4698bc626425
3
+ size 12882349848
model-00023-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:275bc4f41159cf8f79a19ace89f8a771e36d30c7b5f18ca154a237f4390ba57c
3
+ size 12882363664
model-00025-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6a35bfcf777490d622c6dffe9bf2f249e03167c836352b3a04cf883b4422400
3
+ size 12882349848
model-00036-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91356d0ea2a7163330e0c6c19f6a30844db27843d3c4f535614c52046bddbfcc
3
+ size 12882349840
model-00045-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a20c857086e18f99b315d193809856666d3dc93d77239f1e798f20ad92aa3930
3
+ size 12882363752
model-00046-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e88e73359635345ae21496d0d39268a79cc4051562abf2079ab497b3da7c3c8
3
+ size 12882349848
model-00047-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80abd5710133465ee536f3f088a92dc73d9087fc9185f658ae0e6724ecb6537c
3
+ size 12882349832
model-00049-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcabfc65485f82f96bda786270607f56d428f1d5b22ab743483abe7014a08461
3
+ size 12882349832
model-00057-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32380b81ad026f05ad4e5302315f709df3d846522bd12f06fb12d6dd4692289c
3
+ size 12882363656
model-00059-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be543c4dfa4d06d005d503407618d5acd9fcff6bf380c55e68f15534fc5c332d
3
+ size 12882349840
model-00061-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83b684d46b7132ac51dcaadb22fe74f22baf8c0657fd271cee68843586026159
3
+ size 12882349848
model-00062-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a951052e55aad53e5066c1075295139c57ada13089548229d8a7ec060b7f471c
3
+ size 12882349832
model-00063-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5bd3d1e3590cfa5bb65eef332070ce99509c23d933ab74099e46abf4ee58262
3
+ size 12882363656
model-00064-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c64156828480ed3b5806d9be8aa6c8372864ba937c313d11713cbce03ff73cc
3
+ size 12882349848
model-00072-of-00072.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e12cc1cc81caf09ae5c32910feaebd8ad31a9d313c8acc596801382507ed8796
3
+ size 4348672640
model.safetensors.index.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7de640c8e6f374c36de64b925b2c107896731ef642283e490e69125ec5c4eac1
3
+ size 32204741
modeling_interns1_pro.py ADDED
@@ -0,0 +1,1703 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from abc import abstractmethod, ABC
17
+ from dataclasses import dataclass
18
+ from typing import Any, Callable, Optional, Union
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.nn.functional as F
23
+
24
+ from transformers.activations import ACT2FN
25
+ from transformers.cache_utils import Cache, DynamicCache
26
+ from transformers.generation import GenerationMixin
27
+ from transformers.integrations import use_kernel_forward_from_hub
28
+ from transformers.masking_utils import create_causal_mask
29
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
30
+ from transformers.modeling_layers import GradientCheckpointingLayer
31
+ from transformers.modeling_outputs import BaseModelOutputWithPast, ModelOutput
32
+ from .modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
33
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
34
+ from transformers.processing_utils import Unpack
35
+ from transformers.utils import TransformersKwargs, auto_docstring, is_torchdynamo_compiling
36
+ from transformers.utils.generic import OutputRecorder, check_model_inputs
37
+ from .configuration_interns1_pro import InternS1ProConfig, InternS1ProTextConfig, InternS1ProVisionConfig
38
+
39
+
40
+ @use_kernel_forward_from_hub("RMSNorm")
41
+ class Qwen3VLMoeTextRMSNorm(nn.Module):
42
+ def __init__(self, hidden_size, eps=1e-6):
43
+ """
44
+ Qwen3VLMoeTextRMSNorm is equivalent to T5LayerNorm
45
+ """
46
+ super().__init__()
47
+ self.weight = nn.Parameter(torch.ones(hidden_size))
48
+ self.variance_epsilon = eps
49
+
50
+ def forward(self, hidden_states):
51
+ input_dtype = hidden_states.dtype
52
+ hidden_states = hidden_states.to(torch.float32)
53
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
54
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
55
+ return self.weight * hidden_states.to(input_dtype)
56
+
57
+ def extra_repr(self):
58
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
59
+
60
+
61
+ class InternS1ProMoeTextSparseMoeBlock(nn.Module):
62
+ def __init__(self, config):
63
+ super().__init__()
64
+ self.hidden_size = config.hidden_size
65
+ self.num_experts = config.num_experts
66
+ self.top_k = config.num_experts_per_tok
67
+ # self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False)
68
+ if hasattr(config, "router_n_groups") and config.router_n_groups > 1:
69
+ self.gate = InternS1ProMoeTextGroupedRouter(config)
70
+ else:
71
+ self.gate = Qwen3VLMoeTextTopKRouter(config)
72
+ self.experts = nn.ModuleList(
73
+ [Qwen3VLMoeTextMLP(config, intermediate_size=config.moe_intermediate_size) for _ in range(self.num_experts)]
74
+ )
75
+
76
+ # since all the models use norm_topk_prob, we don't need to have a extra check for it
77
+ # self.norm_topk_prob = config.norm_topk_prob
78
+
79
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
80
+ batch_size = hidden_states.shape[0]
81
+
82
+ router_logits, routing_weights, router_indices = self.gate(hidden_states)
83
+
84
+ hidden_states = hidden_states.reshape(batch_size, -1, self.hidden_size)
85
+ hidden_states = hidden_states.reshape(-1, self.hidden_size)
86
+ final_hidden_states = torch.zeros(
87
+ (hidden_states.shape[0], self.hidden_size),
88
+ dtype=hidden_states.dtype,
89
+ device=hidden_states.device,
90
+ )
91
+
92
+ expert_mask = torch.nn.functional.one_hot(router_indices, num_classes=self.num_experts)
93
+ expert_mask = expert_mask.permute(2, 1, 0)
94
+ for expert_idx in range(self.num_experts):
95
+ idx, top_x = torch.where(expert_mask[expert_idx])
96
+ if top_x.numel() == 0:
97
+ continue
98
+ expert_layer = self.experts[expert_idx]
99
+ current_state = hidden_states[top_x]
100
+ current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
101
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
102
+ final_hidden_states = final_hidden_states.reshape(batch_size, -1, self.hidden_size)
103
+ return final_hidden_states
104
+
105
+
106
+ def rotate_half(x):
107
+ """Rotates half the hidden dims of the input."""
108
+ x1 = x[..., : x.shape[-1] // 2]
109
+ x2 = x[..., x.shape[-1] // 2 :]
110
+ return torch.cat((-x2, x1), dim=-1)
111
+
112
+
113
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
114
+ """
115
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
116
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
117
+ """
118
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
119
+ if n_rep == 1:
120
+ return hidden_states
121
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
122
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
123
+
124
+
125
+ def eager_attention_forward(
126
+ module: nn.Module,
127
+ query: torch.Tensor,
128
+ key: torch.Tensor,
129
+ value: torch.Tensor,
130
+ attention_mask: Optional[torch.Tensor],
131
+ scaling: float,
132
+ dropout: float = 0.0,
133
+ **kwargs: Unpack[TransformersKwargs],
134
+ ):
135
+ key_states = repeat_kv(key, module.num_key_value_groups)
136
+ value_states = repeat_kv(value, module.num_key_value_groups)
137
+
138
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
139
+ if attention_mask is not None:
140
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
141
+ attn_weights = attn_weights + causal_mask
142
+
143
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
144
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
145
+ attn_output = torch.matmul(attn_weights, value_states)
146
+ attn_output = attn_output.transpose(1, 2).contiguous()
147
+
148
+ return attn_output, attn_weights
149
+
150
+
151
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
152
+ """Applies Rotary Position Embedding to the query and key tensors.
153
+
154
+ Args:
155
+ q (`torch.Tensor`): The query tensor.
156
+ k (`torch.Tensor`): The key tensor.
157
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
158
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
159
+ position_ids (`torch.Tensor`, *optional*):
160
+ Deprecated and unused.
161
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
162
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
163
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
164
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
165
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
166
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
167
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
168
+ Returns:
169
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
170
+ """
171
+ cos = cos.unsqueeze(unsqueeze_dim)
172
+ sin = sin.unsqueeze(unsqueeze_dim)
173
+ q_embed = (q * cos) + (rotate_half(q) * sin)
174
+ k_embed = (k * cos) + (rotate_half(k) * sin)
175
+ return q_embed, k_embed
176
+
177
+
178
+ def apply_rotary_pos_emb_sep(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
179
+ """Applies Rotary Position Embedding to the query and key tensors with separate head handling for FoPE.
180
+
181
+ Args:
182
+ q (`torch.Tensor`): The query tensor.
183
+ k (`torch.Tensor`): The key tensor.
184
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
185
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
186
+ position_ids (`torch.Tensor`):
187
+ The position indices of the tokens corresponding to the query and key tensors.
188
+ Returns:
189
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
190
+ """
191
+ num_groups = int(q.shape[unsqueeze_dim] // cos.shape[unsqueeze_dim])
192
+ cos_rep = repeat_kv(cos, num_groups)
193
+ sin_rep = repeat_kv(sin, num_groups)
194
+ q_embed = (q * cos_rep) + (rotate_half(q) * sin_rep)
195
+ k_embed = (k * cos) + (rotate_half(k) * sin)
196
+ return q_embed, k_embed
197
+
198
+
199
+ class InternS1ProMoeTextAttention(nn.Module):
200
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
201
+
202
+ def __init__(self, config: InternS1ProTextConfig, layer_idx: int):
203
+ super().__init__()
204
+ self.layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None
205
+ self.config = config
206
+ self.layer_idx = layer_idx
207
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
208
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
209
+ self.scaling = self.head_dim**-0.5
210
+ self.attention_dropout = config.attention_dropout
211
+ self.is_causal = True
212
+
213
+ self.q_proj = nn.Linear(
214
+ config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
215
+ )
216
+ self.k_proj = nn.Linear(
217
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
218
+ )
219
+ self.v_proj = nn.Linear(
220
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
221
+ )
222
+ self.o_proj = nn.Linear(
223
+ config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
224
+ )
225
+ self.q_norm = Qwen3VLMoeTextRMSNorm(
226
+ self.head_dim, eps=config.rms_norm_eps
227
+ ) # unlike olmo, only on the head dim!
228
+ self.k_norm = Qwen3VLMoeTextRMSNorm(
229
+ self.head_dim, eps=config.rms_norm_eps
230
+ ) # thus post q_norm does not need reshape
231
+
232
+ # Check if FoPE is enabled
233
+ self.use_fope = False
234
+ self.fope_sep_head = False
235
+ if config.rope_scaling is not None:
236
+ self.use_fope = (
237
+ config.rope_scaling.get("fope_init_factor", None) is not None
238
+ or config.rope_scaling.get("fope_sep_head", None) is not None
239
+ or config.rope_scaling.get("num_inv_freq", None) is not None
240
+ )
241
+ self.fope_sep_head = config.rope_scaling.get("fope_sep_head", False)
242
+
243
+ def forward(
244
+ self,
245
+ hidden_states: torch.Tensor,
246
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
247
+ attention_mask: Optional[torch.Tensor],
248
+ past_key_values: Optional[Cache] = None,
249
+ cache_position: Optional[torch.LongTensor] = None,
250
+ **kwargs: Unpack[FlashAttentionKwargs],
251
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
252
+ input_shape = hidden_states.shape[:-1]
253
+ hidden_shape = (*input_shape, -1, self.head_dim)
254
+
255
+ query_states = self.q_norm(self.q_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
256
+ key_states = self.k_norm(self.k_proj(hidden_states).view(hidden_shape)).transpose(1, 2)
257
+ value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
258
+
259
+ cos, sin = position_embeddings
260
+ if self.use_fope and self.fope_sep_head:
261
+ query_states, key_states = apply_rotary_pos_emb_sep(query_states, key_states, cos, sin)
262
+ else:
263
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
264
+
265
+ if past_key_values is not None:
266
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
267
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
268
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
269
+
270
+ attention_interface: Callable = eager_attention_forward
271
+ if self.config._attn_implementation != "eager":
272
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
273
+
274
+ attn_output, attn_weights = attention_interface(
275
+ self,
276
+ query_states,
277
+ key_states,
278
+ value_states,
279
+ attention_mask,
280
+ dropout=0.0 if not self.training else self.attention_dropout,
281
+ scaling=self.scaling,
282
+ **kwargs,
283
+ )
284
+
285
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
286
+ attn_output = self.o_proj(attn_output)
287
+ return attn_output, attn_weights
288
+
289
+
290
+ class Qwen3VLMoeTextMLP(nn.Module):
291
+ def __init__(self, config, intermediate_size=None):
292
+ super().__init__()
293
+ self.config = config
294
+ self.hidden_size = config.hidden_size
295
+ self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
296
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
297
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
298
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
299
+ self.act_fn = ACT2FN[config.hidden_act]
300
+
301
+ def forward(self, x):
302
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
303
+ return down_proj
304
+
305
+
306
+ class Qwen3VLMoeTextRouter(nn.Module, ABC):
307
+ """
308
+ Abstract base class for MoE routers.
309
+
310
+ Why this class exists:
311
+ - We have multiple router implementations selected by config (e.g. TopK vs Grouped routing).
312
+ - HuggingFace `PreTrainedModel._can_record_outputs` / `OutputRecorder` expects a *single* target module class
313
+ to match when recording intermediate outputs (it does not reliably support passing a tuple/union of classes).
314
+ - Defining a shared base class lets us set:
315
+ OutputRecorder(Qwen3VLMoeTextRouter, layer_name="mlp.gate", index=0)
316
+ so both `Qwen3VLMoeTextTopKRouter` and `Qwen3VLMoeTextGroupedRouter` are recordable through the same hook,
317
+ while still keeping the implementation-specific routing logic in subclasses.
318
+ """
319
+ def __init__(self, config):
320
+ super().__init__()
321
+
322
+ @abstractmethod
323
+ def forward(self, hidden_states):
324
+ pass
325
+
326
+
327
+ class Qwen3VLMoeTextTopKRouter(Qwen3VLMoeTextRouter):
328
+ def __init__(self, config):
329
+ super().__init__(config)
330
+ self.top_k = config.num_experts_per_tok
331
+ self.num_experts = config.num_experts
332
+ self.norm_topk_prob = config.norm_topk_prob
333
+ self.hidden_dim = config.hidden_size
334
+ self.weight = nn.Parameter(torch.zeros(self.num_experts, self.hidden_dim))
335
+
336
+ def forward(self, hidden_states):
337
+ hidden_states = hidden_states.reshape(-1, self.hidden_dim)
338
+ router_logits = F.linear(hidden_states, self.weight) # (seq_len, num_experts)
339
+ routing_weights = torch.nn.functional.softmax(router_logits, dtype=torch.float, dim=-1)
340
+ router_top_value, router_indices = torch.topk(routing_weights, self.top_k, dim=-1) # (seq_len, top_k)
341
+ if self.norm_topk_prob:
342
+ router_top_value /= router_top_value.sum(dim=-1, keepdim=True)
343
+ router_top_value = router_top_value.to(router_logits.dtype)
344
+ router_scores = router_top_value
345
+ return router_logits, router_scores, router_indices
346
+
347
+
348
+ class InternS1ProMoeTextGroupedRouter(Qwen3VLMoeTextRouter):
349
+ def __init__(self, config):
350
+ super().__init__(config)
351
+ self.top_k = config.num_experts_per_tok
352
+ self.num_experts = config.num_experts
353
+ self.norm_topk_prob = config.norm_topk_prob
354
+ self.hidden_dim = config.hidden_size
355
+ self.weight = nn.Parameter(torch.zeros(self.num_experts, self.hidden_dim))
356
+
357
+ self.router_n_groups = config.router_n_groups
358
+
359
+ def forward(self, hidden_states):
360
+ hidden_states = hidden_states.reshape(-1, self.hidden_dim)
361
+ router_logits = F.linear(hidden_states, self.weight) # (seq_len, num_experts)
362
+ routing_weights = torch.nn.functional.softmax(router_logits, dtype=torch.float, dim=-1)
363
+
364
+ # group-based selection
365
+ assert self.num_experts % self.router_n_groups == 0, f"n_routed_experts must be divisible by {self.router_n_groups}"
366
+ group_size = max(1, self.num_experts // self.router_n_groups)
367
+ seq_len = hidden_states.shape[0]
368
+ routing_weights = routing_weights.view(seq_len, self.router_n_groups, group_size)
369
+
370
+ # [seq, n_groups, top_k_per_group]
371
+ group_local_max_idx = torch.topk(routing_weights, k=self.top_k // self.router_n_groups, dim=2)[1]
372
+ # [1, n_groups, 1]
373
+ group_offsets = (torch.arange(self.router_n_groups, device=routing_weights.device) * group_size).view(1, -1, 1)
374
+
375
+ router_indices = (group_local_max_idx + group_offsets).to(torch.long) # [seq, n_groups, top_k_per_group]
376
+ routing_weights = routing_weights.view(seq_len, self.num_experts)
377
+ router_indices = router_indices.view(seq_len, self.top_k) # [seq, top_k]
378
+ router_top_value = routing_weights.gather(1, router_indices) # [seq, n_groups]
379
+
380
+ if self.norm_topk_prob:
381
+ denominator = router_top_value.sum(dim=-1, keepdim=True) + 1e-20
382
+ router_top_value = router_top_value / denominator
383
+
384
+ router_top_value = router_top_value.to(router_logits.dtype)
385
+ router_scores = router_top_value
386
+ return router_logits, router_scores, router_indices
387
+
388
+
389
+ class InternS1ProMoeTextDecoderLayer(GradientCheckpointingLayer):
390
+ def __init__(self, config: InternS1ProTextConfig, layer_idx: int):
391
+ super().__init__()
392
+ self.self_attn = InternS1ProMoeTextAttention(config, layer_idx)
393
+ if (layer_idx not in config.mlp_only_layers) and (
394
+ config.num_experts > 0 and (layer_idx + 1) % config.decoder_sparse_step == 0
395
+ ):
396
+ self.mlp = InternS1ProMoeTextSparseMoeBlock(config)
397
+ else:
398
+ self.mlp = Qwen3VLMoeTextMLP(config, intermediate_size=config.intermediate_size)
399
+ self.input_layernorm = Qwen3VLMoeTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
400
+ self.post_attention_layernorm = Qwen3VLMoeTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
401
+ self.hidden_size = config.hidden_size
402
+
403
+ def forward(
404
+ self,
405
+ hidden_states: torch.Tensor,
406
+ attention_mask: Optional[torch.Tensor] = None,
407
+ position_ids: Optional[torch.LongTensor] = None,
408
+ past_key_values: Optional[Cache] = None,
409
+ use_cache: Optional[bool] = False,
410
+ cache_position: Optional[torch.LongTensor] = None,
411
+ position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
412
+ **kwargs: Unpack[TransformersKwargs],
413
+ ) -> torch.Tensor:
414
+ residual = hidden_states
415
+ hidden_states = self.input_layernorm(hidden_states)
416
+ # Self Attention
417
+ hidden_states, _ = self.self_attn(
418
+ hidden_states=hidden_states,
419
+ attention_mask=attention_mask,
420
+ position_ids=position_ids,
421
+ past_key_values=past_key_values,
422
+ use_cache=use_cache,
423
+ cache_position=cache_position,
424
+ position_embeddings=position_embeddings,
425
+ **kwargs,
426
+ )
427
+ hidden_states = residual + hidden_states
428
+
429
+ # Fully Connected
430
+ residual = hidden_states
431
+ hidden_states = self.post_attention_layernorm(hidden_states)
432
+ hidden_states = self.mlp(hidden_states)
433
+ hidden_states = residual + hidden_states
434
+ return hidden_states
435
+
436
+
437
+ @auto_docstring
438
+ class InternS1ProPreTrainedModel(PreTrainedModel):
439
+ config: InternS1ProConfig
440
+ base_model_prefix = "model"
441
+ supports_gradient_checkpointing = True
442
+ _no_split_modules = ["InternS1ProMoeTextDecoderLayer", "Qwen3VLMoeVisionBlock"]
443
+ _skip_keys_device_placement = ["past_key_values"]
444
+ _supports_flash_attn = True
445
+ _supports_sdpa = True
446
+ _supports_flex_attn = True
447
+ _can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
448
+ _supports_attention_backend = True
449
+ _can_record_outputs = {
450
+ "router_logits": OutputRecorder(Qwen3VLMoeTextRouter, layer_name="mlp.gate", index=0),
451
+ "hidden_states": InternS1ProMoeTextDecoderLayer,
452
+ "attentions": InternS1ProMoeTextAttention,
453
+ }
454
+
455
+ def _init_weights(self, module):
456
+ """Initialize the weights."""
457
+ super()._init_weights(module)
458
+ if hasattr(self.config, "initializer_range"):
459
+ std = self.config.initializer_range
460
+ else:
461
+ std = getattr(self.config.get_text_config(), "initializer_range", 0.02)
462
+ if isinstance(module, InternS1ProMoeTextSparseMoeBlock):
463
+ for expert in module.experts:
464
+ expert.gate_proj.weight.data.normal_(mean=0.0, std=std)
465
+ expert.up_proj.weight.data.normal_(mean=0.0, std=std)
466
+ expert.down_proj.weight.data.normal_(mean=0.0, std=std)
467
+
468
+
469
+ class Qwen3VLMoeVisionMLP(nn.Module):
470
+ def __init__(self, config):
471
+ super().__init__()
472
+ self.hidden_size = config.hidden_size
473
+ self.intermediate_size = config.intermediate_size
474
+ self.linear_fc1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=True)
475
+ self.linear_fc2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=True)
476
+ self.act_fn = ACT2FN[config.hidden_act]
477
+
478
+ def forward(self, hidden_state):
479
+ return self.linear_fc2(self.act_fn(self.linear_fc1(hidden_state)))
480
+
481
+
482
+ class Qwen3VLMoeVisionPatchEmbed(nn.Module):
483
+ def __init__(self, config) -> None:
484
+ super().__init__()
485
+ self.patch_size = config.patch_size
486
+ self.temporal_patch_size = config.temporal_patch_size
487
+ self.in_channels = config.in_channels
488
+ self.embed_dim = config.hidden_size
489
+
490
+ kernel_size = [self.temporal_patch_size, self.patch_size, self.patch_size]
491
+ self.proj = nn.Conv3d(self.in_channels, self.embed_dim, kernel_size=kernel_size, stride=kernel_size, bias=True)
492
+
493
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
494
+ target_dtype = self.proj.weight.dtype
495
+ hidden_states = hidden_states.view(
496
+ -1, self.in_channels, self.temporal_patch_size, self.patch_size, self.patch_size
497
+ )
498
+ hidden_states = self.proj(hidden_states.to(dtype=target_dtype)).view(-1, self.embed_dim)
499
+ return hidden_states
500
+
501
+
502
+ class Qwen3VLMoeVisionRotaryEmbedding(nn.Module):
503
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
504
+
505
+ def __init__(self, dim: int, theta: float = 10000.0) -> None:
506
+ super().__init__()
507
+ inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
508
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
509
+
510
+ def forward(self, seqlen: int) -> torch.Tensor:
511
+ seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
512
+ freqs = torch.outer(seq, self.inv_freq)
513
+ return freqs
514
+
515
+
516
+ class Qwen3VLMoeVisionPatchMerger(nn.Module):
517
+ def __init__(self, config: InternS1ProVisionConfig, use_postshuffle_norm=False) -> None:
518
+ super().__init__()
519
+ self.hidden_size = config.hidden_size * (config.spatial_merge_size**2)
520
+ self.use_postshuffle_norm = use_postshuffle_norm
521
+ self.norm = nn.LayerNorm(self.hidden_size if use_postshuffle_norm else config.hidden_size, eps=1e-6)
522
+ self.linear_fc1 = nn.Linear(self.hidden_size, self.hidden_size)
523
+ self.act_fn = nn.GELU()
524
+ self.linear_fc2 = nn.Linear(self.hidden_size, config.out_hidden_size)
525
+
526
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
527
+ x = self.norm(x.view(-1, self.hidden_size) if self.use_postshuffle_norm else x).view(-1, self.hidden_size)
528
+ x = self.linear_fc2(self.act_fn(self.linear_fc1(x)))
529
+ return x
530
+
531
+
532
+ def apply_rotary_pos_emb_vision(
533
+ q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
534
+ ) -> tuple[torch.Tensor, torch.Tensor]:
535
+ orig_q_dtype = q.dtype
536
+ orig_k_dtype = k.dtype
537
+ q, k = q.float(), k.float()
538
+ cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float()
539
+ q_embed = (q * cos) + (rotate_half(q) * sin)
540
+ k_embed = (k * cos) + (rotate_half(k) * sin)
541
+ q_embed = q_embed.to(orig_q_dtype)
542
+ k_embed = k_embed.to(orig_k_dtype)
543
+ return q_embed, k_embed
544
+
545
+
546
+ class Qwen3VLMoeVisionAttention(nn.Module):
547
+ def __init__(self, config: InternS1ProVisionConfig) -> None:
548
+ super().__init__()
549
+ self.dim = config.hidden_size
550
+ self.num_heads = config.num_heads
551
+ self.head_dim = self.dim // self.num_heads
552
+ self.num_key_value_groups = 1 # needed for eager attention
553
+ self.qkv = nn.Linear(self.dim, self.dim * 3, bias=True)
554
+ self.proj = nn.Linear(self.dim, self.dim)
555
+ self.scaling = self.head_dim**-0.5
556
+ self.config = config
557
+ self.attention_dropout = 0.0
558
+ self.is_causal = False
559
+
560
+ def forward(
561
+ self,
562
+ hidden_states: torch.Tensor,
563
+ cu_seqlens: torch.Tensor,
564
+ rotary_pos_emb: Optional[torch.Tensor] = None,
565
+ position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
566
+ **kwargs,
567
+ ) -> torch.Tensor:
568
+ seq_length = hidden_states.shape[0]
569
+ query_states, key_states, value_states = (
570
+ self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0)
571
+ )
572
+ cos, sin = position_embeddings
573
+ query_states, key_states = apply_rotary_pos_emb_vision(query_states, key_states, cos, sin)
574
+
575
+ query_states = query_states.transpose(0, 1).unsqueeze(0)
576
+ key_states = key_states.transpose(0, 1).unsqueeze(0)
577
+ value_states = value_states.transpose(0, 1).unsqueeze(0)
578
+
579
+ attention_interface: Callable = eager_attention_forward
580
+ if self.config._attn_implementation != "eager":
581
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
582
+
583
+ if self.config._attn_implementation == "flash_attention_2":
584
+ # Flash Attention 2: Use cu_seqlens for variable length attention
585
+ max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
586
+ attn_output, _ = attention_interface(
587
+ self,
588
+ query_states,
589
+ key_states,
590
+ value_states,
591
+ attention_mask=None,
592
+ scaling=self.scaling,
593
+ dropout=0.0 if not self.training else self.attention_dropout,
594
+ cu_seq_lens_q=cu_seqlens,
595
+ cu_seq_lens_k=cu_seqlens,
596
+ max_length_q=max_seqlen,
597
+ max_length_k=max_seqlen,
598
+ is_causal=False,
599
+ **kwargs,
600
+ )
601
+ else:
602
+ # Other implementations: Process each chunk separately
603
+ lengths = cu_seqlens[1:] - cu_seqlens[:-1]
604
+ splits = [
605
+ torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states)
606
+ ]
607
+
608
+ attn_outputs = [
609
+ attention_interface(
610
+ self,
611
+ q,
612
+ k,
613
+ v,
614
+ attention_mask=None,
615
+ scaling=self.scaling,
616
+ dropout=0.0 if not self.training else self.attention_dropout,
617
+ is_causal=False,
618
+ **kwargs,
619
+ )[0]
620
+ for q, k, v in zip(*splits)
621
+ ]
622
+ attn_output = torch.cat(attn_outputs, dim=1)
623
+
624
+ attn_output = attn_output.reshape(seq_length, -1).contiguous()
625
+ attn_output = self.proj(attn_output)
626
+ return attn_output
627
+
628
+
629
+ class Qwen3VLMoeVisionBlock(GradientCheckpointingLayer):
630
+ def __init__(self, config, attn_implementation: str = "sdpa") -> None:
631
+ super().__init__()
632
+ self.norm1 = nn.LayerNorm(config.hidden_size, eps=1e-6)
633
+ self.norm2 = nn.LayerNorm(config.hidden_size, eps=1e-6)
634
+ self.attn = Qwen3VLMoeVisionAttention(config=config)
635
+ self.mlp = Qwen3VLMoeVisionMLP(config=config)
636
+
637
+ def forward(
638
+ self,
639
+ hidden_states: torch.Tensor,
640
+ cu_seqlens: torch.Tensor,
641
+ rotary_pos_emb: Optional[torch.Tensor] = None,
642
+ position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
643
+ **kwargs,
644
+ ) -> torch.Tensor:
645
+ hidden_states = hidden_states + self.attn(
646
+ self.norm1(hidden_states),
647
+ cu_seqlens=cu_seqlens,
648
+ rotary_pos_emb=rotary_pos_emb,
649
+ position_embeddings=position_embeddings,
650
+ **kwargs,
651
+ )
652
+ hidden_states = hidden_states + self.mlp(self.norm2(hidden_states))
653
+ return hidden_states
654
+
655
+
656
+ class InternS1ProVisionModel(InternS1ProPreTrainedModel):
657
+ config: InternS1ProVisionConfig
658
+ _no_split_modules = ["Qwen3VLMoeVisionBlock"]
659
+
660
+ def __init__(self, config, *inputs, **kwargs) -> None:
661
+ super().__init__(config, *inputs, **kwargs)
662
+ self.spatial_merge_size = config.spatial_merge_size
663
+ self.patch_size = config.patch_size
664
+ self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size
665
+
666
+ self.patch_embed = Qwen3VLMoeVisionPatchEmbed(
667
+ config=config,
668
+ )
669
+
670
+ self.pos_embed = nn.Embedding(config.num_position_embeddings, config.hidden_size)
671
+ self.num_grid_per_side = int(config.num_position_embeddings**0.5)
672
+
673
+ head_dim = config.hidden_size // config.num_heads
674
+ self.rotary_pos_emb = Qwen3VLMoeVisionRotaryEmbedding(head_dim // 2)
675
+
676
+ self.blocks = nn.ModuleList([Qwen3VLMoeVisionBlock(config) for _ in range(config.depth)])
677
+ self.merger = Qwen3VLMoeVisionPatchMerger(
678
+ config=config,
679
+ use_postshuffle_norm=False,
680
+ )
681
+
682
+ self.gradient_checkpointing = False
683
+
684
+ def rot_pos_emb(self, grid_thw: torch.Tensor) -> torch.Tensor:
685
+ merge_size = self.spatial_merge_size
686
+
687
+ max_hw = int(grid_thw[:, 1:].max().item())
688
+ freq_table = self.rotary_pos_emb(max_hw) # (max_hw, dim // 2)
689
+ device = freq_table.device
690
+
691
+ total_tokens = int(torch.prod(grid_thw, dim=1).sum().item())
692
+ pos_ids = torch.empty((total_tokens, 2), dtype=torch.long, device=device)
693
+
694
+ offset = 0
695
+ for num_frames, height, width in grid_thw:
696
+ merged_h, merged_w = height // merge_size, width // merge_size
697
+
698
+ block_rows = torch.arange(merged_h, device=device) # block row indices
699
+ block_cols = torch.arange(merged_w, device=device) # block col indices
700
+ intra_row = torch.arange(merge_size, device=device) # intra-block row offsets
701
+ intra_col = torch.arange(merge_size, device=device) # intra-block col offsets
702
+
703
+ # Compute full-resolution positions
704
+ row_idx = block_rows[:, None, None, None] * merge_size + intra_row[None, None, :, None]
705
+ col_idx = block_cols[None, :, None, None] * merge_size + intra_col[None, None, None, :]
706
+
707
+ row_idx = row_idx.expand(merged_h, merged_w, merge_size, merge_size).reshape(-1)
708
+ col_idx = col_idx.expand(merged_h, merged_w, merge_size, merge_size).reshape(-1)
709
+
710
+ coords = torch.stack((row_idx, col_idx), dim=-1)
711
+
712
+ if num_frames > 1:
713
+ coords = coords.repeat(num_frames, 1)
714
+
715
+ num_tokens = coords.shape[0]
716
+ pos_ids[offset : offset + num_tokens] = coords
717
+ offset += num_tokens
718
+
719
+ embeddings = freq_table[pos_ids] # lookup rotary embeddings
720
+ embeddings = embeddings.flatten(1)
721
+ return embeddings
722
+
723
+ def fast_pos_embed_interpolate(self, grid_thw):
724
+ grid_ts, grid_hs, grid_ws = grid_thw[:, 0], grid_thw[:, 1], grid_thw[:, 2]
725
+ device = grid_thw.device
726
+
727
+ idx_list = [[] for _ in range(4)]
728
+ weight_list = [[] for _ in range(4)]
729
+
730
+ for t, h, w in zip(grid_ts, grid_hs, grid_ws):
731
+ h_idxs = torch.linspace(0, self.num_grid_per_side - 1, h)
732
+ w_idxs = torch.linspace(0, self.num_grid_per_side - 1, w)
733
+
734
+ h_idxs_floor = h_idxs.int()
735
+ w_idxs_floor = w_idxs.int()
736
+ h_idxs_ceil = (h_idxs.int() + 1).clip(max=self.num_grid_per_side - 1)
737
+ w_idxs_ceil = (w_idxs.int() + 1).clip(max=self.num_grid_per_side - 1)
738
+
739
+ dh = h_idxs - h_idxs_floor
740
+ dw = w_idxs - w_idxs_floor
741
+
742
+ base_h = h_idxs_floor * self.num_grid_per_side
743
+ base_h_ceil = h_idxs_ceil * self.num_grid_per_side
744
+
745
+ indices = [
746
+ (base_h[None].T + w_idxs_floor[None]).flatten(),
747
+ (base_h[None].T + w_idxs_ceil[None]).flatten(),
748
+ (base_h_ceil[None].T + w_idxs_floor[None]).flatten(),
749
+ (base_h_ceil[None].T + w_idxs_ceil[None]).flatten(),
750
+ ]
751
+
752
+ weights = [
753
+ ((1 - dh)[None].T * (1 - dw)[None]).flatten(),
754
+ ((1 - dh)[None].T * dw[None]).flatten(),
755
+ (dh[None].T * (1 - dw)[None]).flatten(),
756
+ (dh[None].T * dw[None]).flatten(),
757
+ ]
758
+
759
+ for i in range(4):
760
+ idx_list[i].extend(indices[i].tolist())
761
+ weight_list[i].extend(weights[i].tolist())
762
+
763
+ idx_tensor = torch.tensor(idx_list, dtype=torch.long, device=device)
764
+ weight_tensor = torch.tensor(weight_list, dtype=self.pos_embed.weight.dtype, device=device)
765
+ pos_embeds = self.pos_embed(idx_tensor).to(device) * weight_tensor[:, :, None]
766
+ patch_pos_embeds = pos_embeds[0] + pos_embeds[1] + pos_embeds[2] + pos_embeds[3]
767
+
768
+ patch_pos_embeds = patch_pos_embeds.split([h * w for h, w in zip(grid_hs, grid_ws)])
769
+
770
+ patch_pos_embeds_permute = []
771
+ merge_size = self.config.spatial_merge_size
772
+ for pos_embed, t, h, w in zip(patch_pos_embeds, grid_ts, grid_hs, grid_ws):
773
+ pos_embed = pos_embed.repeat(t, 1)
774
+ pos_embed = (
775
+ pos_embed.view(t, h // merge_size, merge_size, w // merge_size, merge_size, -1)
776
+ .permute(0, 1, 3, 2, 4, 5)
777
+ .flatten(0, 4)
778
+ )
779
+ patch_pos_embeds_permute.append(pos_embed)
780
+ patch_pos_embeds = torch.cat(patch_pos_embeds_permute)
781
+ return patch_pos_embeds
782
+
783
+ def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor:
784
+ """
785
+ Args:
786
+ hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
787
+ The final hidden states of the model.
788
+ grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):
789
+ The temporal, height and width of feature shape of each image in LLM.
790
+
791
+ Returns:
792
+ `torch.Tensor`: hidden_states.
793
+ """
794
+ hidden_states = self.patch_embed(hidden_states)
795
+
796
+ pos_embeds = self.fast_pos_embed_interpolate(grid_thw)
797
+ hidden_states = hidden_states + pos_embeds
798
+
799
+ rotary_pos_emb = self.rot_pos_emb(grid_thw)
800
+
801
+ seq_len, _ = hidden_states.size()
802
+ hidden_states = hidden_states.reshape(seq_len, -1)
803
+ rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)
804
+ emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
805
+ position_embeddings = (emb.cos(), emb.sin())
806
+
807
+ cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
808
+ dim=0,
809
+ # Select dtype based on the following factors:
810
+ # - FA2 requires that cu_seqlens_q must have dtype int32
811
+ # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
812
+ # See https://github.com/huggingface/transformers/pull/34852 for more information
813
+ dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
814
+ )
815
+ cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
816
+
817
+ for layer_num, blk in enumerate(self.blocks):
818
+ hidden_states = blk(
819
+ hidden_states,
820
+ cu_seqlens=cu_seqlens,
821
+ position_embeddings=position_embeddings,
822
+ **kwargs,
823
+ )
824
+
825
+ hidden_states = self.merger(hidden_states)
826
+
827
+ return hidden_states
828
+
829
+
830
+ class Qwen3VLMoeTextRotaryEmbedding(nn.Module):
831
+ def __init__(self, config: InternS1ProConfig, device=None):
832
+ super().__init__()
833
+ # BC: "rope_type" was originally "type"
834
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
835
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
836
+ else:
837
+ self.rope_type = "default"
838
+ self.max_seq_len_cached = config.max_position_embeddings
839
+ self.original_max_seq_len = config.max_position_embeddings
840
+
841
+ self.config = config
842
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
843
+
844
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
845
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
846
+ self.original_inv_freq = self.inv_freq
847
+
848
+ @torch.no_grad()
849
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
850
+ def forward(self, x, position_ids):
851
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
852
+ position_ids_expanded = position_ids[:, None, :].float()
853
+
854
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
855
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
856
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
857
+ emb = torch.cat((freqs, freqs), dim=-1)
858
+ cos = emb.cos() * self.attention_scaling
859
+ sin = emb.sin() * self.attention_scaling
860
+
861
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
862
+
863
+
864
+ class InternS1ProMoeTextFourierEmbedding(Qwen3VLMoeTextRotaryEmbedding):
865
+ def __init__(self, config: InternS1ProConfig, device=None):
866
+ super().__init__(config=config)
867
+ self.num_inv_freq = config.rope_scaling.get("num_inv_freq", None)
868
+ self.fope_sep_head = config.rope_scaling.get("fope_sep_head", None)
869
+ self.fope_init_factor = config.rope_scaling.get("fope_init_factor", None)
870
+ if self.num_inv_freq is not None:
871
+ assert (self.inv_freq > (2.0 * torch.pi / config.max_position_embeddings)).all() or (self.inv_freq.shape[-1] == self.num_inv_freq), "FoPE is wrongly initialized."
872
+
873
+ self.head_dim = getattr(self.config, "head_dim", None) or config.hidden_size // config.num_attention_heads
874
+ self.input_dim = self.inv_freq.shape[-1]
875
+ self.output_dim = self.inv_freq.shape[-1]
876
+
877
+ if self.fope_sep_head:
878
+ sin_coef = torch.randn(self.config.num_key_value_heads, self.input_dim, self.output_dim).to(self.inv_freq.device)
879
+ cos_coef = torch.randn(self.config.num_key_value_heads, self.input_dim, self.output_dim).to(self.inv_freq.device)
880
+ else:
881
+ sin_coef = torch.randn(self.input_dim, self.output_dim).to(self.inv_freq.device)
882
+ cos_coef = torch.randn(self.input_dim, self.output_dim).to(self.inv_freq.device)
883
+
884
+ torch.nn.init.xavier_normal_(sin_coef, gain=self.fope_init_factor)
885
+ torch.nn.init.xavier_normal_(cos_coef, gain=self.fope_init_factor)
886
+
887
+ if self.input_dim == self.output_dim:
888
+ sin_coef += torch.eye(self.input_dim, device=sin_coef.device)
889
+ cos_coef += torch.eye(self.input_dim, device=cos_coef.device)
890
+ else:
891
+ sin_coef += self.get_step_eye(sin_coef)
892
+ cos_coef += self.get_step_eye(cos_coef)
893
+
894
+ self.register_buffer("sin_coef", sin_coef, persistent=True)
895
+ self.register_buffer("cos_coef", cos_coef, persistent=True)
896
+
897
+ def get_step_eye(self, _param):
898
+ import math
899
+
900
+ _step_eye = torch.zeros_like(_param)
901
+
902
+ step = math.ceil(self.input_dim / self.output_dim)
903
+ for i in range(self.output_dim):
904
+ if i*step < self.input_dim:
905
+ _step_eye[..., i*step, i] = 1.0
906
+
907
+ return _step_eye
908
+
909
+ def forward(self, x, position_ids):
910
+ if "dynamic" in self.rope_type:
911
+ raise NotImplementedError
912
+ self._dynamic_frequency_update(position_ids, device=x.device)
913
+
914
+ # Core RoPE block
915
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
916
+ position_ids_expanded = position_ids[:, None, :].float()
917
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
918
+ device_type = x.device.type
919
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
920
+ batch_size, seq_len, hidden_size = x.shape
921
+ with torch.autocast(device_type=device_type, enabled=False):
922
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
923
+ if self.fope_sep_head:
924
+ pos_cos = freqs.cos().unsqueeze(1).expand(batch_size, self.config.num_key_value_heads, seq_len, -1)
925
+ pos_sin = freqs.sin().unsqueeze(1).expand(batch_size, self.config.num_key_value_heads, seq_len, -1)
926
+ else:
927
+ pos_cos = freqs.cos()
928
+ pos_sin = freqs.sin()
929
+
930
+ if self.fope_sep_head:
931
+ sin = torch.einsum("bhtD, hDd -> bhtd", pos_sin, self.sin_coef.float())
932
+ cos = torch.einsum("bhtD, hDd -> bhtd", pos_cos, self.cos_coef.float())
933
+ else:
934
+ sin = torch.einsum("btD, Dd -> btd", pos_sin, self.sin_coef.float())
935
+ cos = torch.einsum("btD, Dd -> btd", pos_cos, self.cos_coef.float())
936
+
937
+ sin = F.pad(input=sin, pad=(0, self.head_dim // 2 - sin.size(-1)), mode="constant", value=1)
938
+ cos = F.pad(input=cos, pad=(0, self.head_dim // 2 - cos.size(-1)), mode="constant", value=1)
939
+
940
+ sin = torch.cat((sin, sin), dim=-1)
941
+ cos = torch.cat((cos, cos), dim=-1)
942
+
943
+ cos = cos * self.attention_scaling
944
+ sin = sin * self.attention_scaling
945
+
946
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
947
+
948
+
949
+ @auto_docstring(
950
+ custom_intro=(
951
+ "Text part of Qwen3VLMoe with 1D FoPE (Fourier Position Embedding)."
952
+ )
953
+ )
954
+ class InternS1ProTextModel(InternS1ProPreTrainedModel):
955
+ config: InternS1ProTextConfig
956
+ _no_split_modules = ["InternS1ProMoeTextDecoderLayer"]
957
+
958
+ def __init__(self, config: InternS1ProTextConfig):
959
+ super().__init__(config)
960
+ # Check if FoPE is enabled and use appropriate rotary embedding
961
+ self.use_fope = False
962
+ if config.rope_scaling is not None:
963
+ self.use_fope = (
964
+ config.rope_scaling.get("fope_init_factor", None) is not None
965
+ or config.rope_scaling.get("fope_sep_head", None) is not None
966
+ or config.rope_scaling.get("num_inv_freq", None) is not None
967
+ )
968
+
969
+ if self.use_fope:
970
+ with torch.device("cpu"):
971
+ self.rotary_emb = InternS1ProMoeTextFourierEmbedding(config=config)
972
+ else:
973
+ self.rotary_emb = Qwen3VLMoeTextRotaryEmbedding(config=config)
974
+
975
+ self.padding_idx = config.pad_token_id
976
+ self.vocab_size = config.vocab_size
977
+
978
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
979
+ self.layers = nn.ModuleList(
980
+ [InternS1ProMoeTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
981
+ )
982
+ self.norm = Qwen3VLMoeTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
983
+
984
+ self.gradient_checkpointing = False
985
+
986
+ # Initialize weights and apply final processing
987
+ self.post_init()
988
+
989
+ @check_model_inputs()
990
+ @auto_docstring
991
+ def forward(
992
+ self,
993
+ input_ids: Optional[torch.LongTensor] = None,
994
+ attention_mask: Optional[torch.Tensor] = None,
995
+ position_ids: Optional[torch.LongTensor] = None,
996
+ past_key_values: Optional[Cache] = None,
997
+ inputs_embeds: Optional[torch.FloatTensor] = None,
998
+ use_cache: Optional[bool] = None,
999
+ cache_position: Optional[torch.LongTensor] = None,
1000
+ **kwargs: Unpack[FlashAttentionKwargs],
1001
+ ) -> Union[tuple, BaseModelOutputWithPast]:
1002
+ r"""
1003
+ Args documentation for InternS1ProTextModel forward method.
1004
+ """
1005
+ if (input_ids is None) ^ (inputs_embeds is not None):
1006
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1007
+
1008
+ # torch.jit.trace() doesn't support cache objects in the output
1009
+ if use_cache and past_key_values is None and not torch.jit.is_tracing():
1010
+ past_key_values = DynamicCache(config=self.config)
1011
+
1012
+ if inputs_embeds is None:
1013
+ inputs_embeds = self.embed_tokens(input_ids)
1014
+
1015
+ if cache_position is None:
1016
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1017
+ cache_position = torch.arange(
1018
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
1019
+ )
1020
+
1021
+ if position_ids is None:
1022
+ batch_size = inputs_embeds.shape[0]
1023
+ position_ids = cache_position.unsqueeze(0).expand(batch_size, -1)
1024
+
1025
+ attention_mask = create_causal_mask(
1026
+ config=self.config,
1027
+ input_embeds=inputs_embeds,
1028
+ attention_mask=attention_mask,
1029
+ cache_position=cache_position,
1030
+ past_key_values=past_key_values,
1031
+ position_ids=position_ids,
1032
+ )
1033
+
1034
+ hidden_states = inputs_embeds
1035
+
1036
+ # create position embeddings to be shared across the decoder layers
1037
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
1038
+
1039
+ # decoder layers
1040
+ for layer_idx, decoder_layer in enumerate(self.layers):
1041
+ layer_outputs = decoder_layer(
1042
+ hidden_states,
1043
+ attention_mask=attention_mask,
1044
+ position_ids=position_ids,
1045
+ past_key_values=past_key_values,
1046
+ cache_position=cache_position,
1047
+ position_embeddings=position_embeddings,
1048
+ **kwargs,
1049
+ )
1050
+ hidden_states = layer_outputs
1051
+
1052
+ hidden_states = self.norm(hidden_states)
1053
+
1054
+ return BaseModelOutputWithPast(
1055
+ last_hidden_state=hidden_states,
1056
+ past_key_values=past_key_values,
1057
+ )
1058
+
1059
+
1060
+ @dataclass
1061
+ @auto_docstring(
1062
+ custom_intro="""
1063
+ Base class for Qwen3VLMoe causal language model (or autoregressive) outputs.
1064
+ """
1065
+ )
1066
+ class Qwen3VLMoeCausalLMOutputWithPast(ModelOutput):
1067
+ r"""
1068
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
1069
+ Language modeling loss (for next-token prediction).
1070
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
1071
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
1072
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1073
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
1074
+
1075
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
1076
+ `past_key_values` input) to speed up sequential decoding.
1077
+ """
1078
+
1079
+ loss: Optional[torch.FloatTensor] = None
1080
+ logits: Optional[torch.FloatTensor] = None
1081
+ past_key_values: Optional[Cache] = None
1082
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
1083
+ attentions: Optional[tuple[torch.FloatTensor]] = None
1084
+ aux_loss: Optional[torch.FloatTensor] = None
1085
+
1086
+
1087
+ @dataclass
1088
+ @auto_docstring(
1089
+ custom_intro="""
1090
+ Base class for Llava outputs, with hidden states and attentions.
1091
+ """
1092
+ )
1093
+ class Qwen3VLMoeModelOutputWithPast(ModelOutput):
1094
+ r"""
1095
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
1096
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
1097
+
1098
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
1099
+ `past_key_values` input) to speed up sequential decoding.
1100
+ router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.output_router_logits=True`):
1101
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`.
1102
+
1103
+ Raw router logits (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary
1104
+ loss for Mixture of Experts models.
1105
+ """
1106
+
1107
+ last_hidden_state: Optional[torch.FloatTensor] = None
1108
+ past_key_values: Optional[Cache] = None
1109
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
1110
+ attentions: Optional[tuple[torch.FloatTensor]] = None
1111
+ router_logits: Optional[tuple[torch.FloatTensor]] = None
1112
+
1113
+
1114
+ @auto_docstring
1115
+ class InternS1ProModel(InternS1ProPreTrainedModel):
1116
+ base_model_prefix = ""
1117
+ _checkpoint_conversion_mapping = {}
1118
+ # Reference: fix gemma3 grad acc #37208
1119
+ accepts_loss_kwargs = False
1120
+ config: InternS1ProConfig
1121
+ _no_split_modules = ["InternS1ProMoeTextDecoderLayer", "Qwen3VLMoeVisionBlock"]
1122
+
1123
+ def __init__(self, config):
1124
+ super().__init__(config)
1125
+ self.visual = InternS1ProVisionModel._from_config(config.vision_config)
1126
+ self.language_model = InternS1ProTextModel._from_config(config.text_config)
1127
+
1128
+ # Initialize weights and apply final processing
1129
+ self.post_init()
1130
+
1131
+ def get_input_embeddings(self):
1132
+ return self.language_model.get_input_embeddings()
1133
+
1134
+ def set_input_embeddings(self, value):
1135
+ self.language_model.set_input_embeddings(value)
1136
+
1137
+ def set_decoder(self, decoder):
1138
+ self.language_model = decoder
1139
+
1140
+ def get_decoder(self):
1141
+ return self.language_model
1142
+
1143
+ def get_video_features(
1144
+ self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None
1145
+ ):
1146
+ """
1147
+ Encodes videos into continuous embeddings that can be forwarded to the language model.
1148
+
1149
+ Args:
1150
+ pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
1151
+ The tensors corresponding to the input videos.
1152
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1153
+ The temporal, height and width of feature shape of each video in LLM.
1154
+ """
1155
+ # Same implementation as for images
1156
+ return self.get_image_features(pixel_values_videos, video_grid_thw)
1157
+
1158
+ def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None):
1159
+ """
1160
+ Encodes images into continuous embeddings that can be forwarded to the language model.
1161
+
1162
+ Args:
1163
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
1164
+ The tensors corresponding to the input images.
1165
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1166
+ The temporal, height and width of feature shape of each image in LLM.
1167
+ """
1168
+ pixel_values = pixel_values.type(self.visual.dtype)
1169
+ image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)
1170
+ split_sizes = (image_grid_thw.prod(-1) // self.visual.spatial_merge_size**2).tolist()
1171
+ image_embeds = torch.split(image_embeds, split_sizes)
1172
+ return image_embeds
1173
+
1174
+ def get_placeholder_mask(
1175
+ self,
1176
+ input_ids: torch.LongTensor,
1177
+ inputs_embeds: torch.FloatTensor,
1178
+ image_features: Optional[torch.FloatTensor] = None,
1179
+ video_features: Optional[torch.FloatTensor] = None,
1180
+ ):
1181
+ """
1182
+ Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
1183
+ equal to the length of multimodal features. If the lengths are different, an error is raised.
1184
+ """
1185
+ if input_ids is None:
1186
+ special_image_mask = inputs_embeds == self.get_input_embeddings()(
1187
+ torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
1188
+ )
1189
+ special_image_mask = special_image_mask.all(-1)
1190
+ special_video_mask = inputs_embeds == self.get_input_embeddings()(
1191
+ torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device)
1192
+ )
1193
+ special_video_mask = special_video_mask.all(-1)
1194
+ else:
1195
+ special_image_mask = input_ids == self.config.image_token_id
1196
+ special_video_mask = input_ids == self.config.video_token_id
1197
+
1198
+ n_image_tokens = special_image_mask.sum()
1199
+ special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
1200
+ if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel():
1201
+ raise ValueError(
1202
+ f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}"
1203
+ )
1204
+
1205
+ n_video_tokens = special_video_mask.sum()
1206
+ special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
1207
+ if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel():
1208
+ raise ValueError(
1209
+ f"Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}"
1210
+ )
1211
+
1212
+ return special_image_mask, special_video_mask
1213
+
1214
+ @auto_docstring
1215
+ @check_model_inputs()
1216
+ def forward(
1217
+ self,
1218
+ input_ids: torch.LongTensor = None,
1219
+ attention_mask: Optional[torch.Tensor] = None,
1220
+ position_ids: Optional[torch.LongTensor] = None,
1221
+ past_key_values: Optional[Cache] = None,
1222
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1223
+ pixel_values: Optional[torch.Tensor] = None,
1224
+ pixel_values_videos: Optional[torch.FloatTensor] = None,
1225
+ image_grid_thw: Optional[torch.LongTensor] = None,
1226
+ video_grid_thw: Optional[torch.LongTensor] = None,
1227
+ cache_position: Optional[torch.LongTensor] = None,
1228
+ **kwargs: Unpack[TransformersKwargs],
1229
+ ) -> Union[tuple, Qwen3VLMoeModelOutputWithPast]:
1230
+ r"""
1231
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1232
+ The temporal, height and width of feature shape of each image in LLM.
1233
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1234
+ The temporal, height and width of feature shape of each video in LLM.
1235
+ """
1236
+ if (input_ids is None) ^ (inputs_embeds is not None):
1237
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
1238
+
1239
+ if inputs_embeds is None:
1240
+ inputs_embeds = self.get_input_embeddings()(input_ids)
1241
+
1242
+ image_mask = None
1243
+ video_mask = None
1244
+
1245
+ if pixel_values is not None:
1246
+ image_embeds = self.get_image_features(pixel_values, image_grid_thw)
1247
+ image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
1248
+ image_mask, _ = self.get_placeholder_mask(
1249
+ input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds
1250
+ )
1251
+ inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
1252
+
1253
+ if pixel_values_videos is not None:
1254
+ video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw)
1255
+ video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
1256
+ _, video_mask = self.get_placeholder_mask(
1257
+ input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds
1258
+ )
1259
+ inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
1260
+
1261
+ if position_ids is None:
1262
+ batch_size, seq_length = inputs_embeds.shape[:2]
1263
+ if cache_position is not None:
1264
+ position_ids = cache_position.unsqueeze(0).expand(batch_size, -1)
1265
+ else:
1266
+ position_ids = torch.arange(seq_length, device=inputs_embeds.device).unsqueeze(0).expand(batch_size, -1)
1267
+
1268
+ outputs = self.language_model(
1269
+ input_ids=None,
1270
+ position_ids=position_ids,
1271
+ attention_mask=attention_mask,
1272
+ past_key_values=past_key_values,
1273
+ inputs_embeds=inputs_embeds,
1274
+ cache_position=cache_position,
1275
+ **kwargs,
1276
+ )
1277
+
1278
+ return Qwen3VLMoeModelOutputWithPast(
1279
+ last_hidden_state=outputs.last_hidden_state,
1280
+ past_key_values=outputs.past_key_values,
1281
+ )
1282
+
1283
+
1284
+ def load_balancing_loss_func(
1285
+ gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
1286
+ num_experts: Optional[int] = None,
1287
+ top_k=2,
1288
+ attention_mask: Optional[torch.Tensor] = None,
1289
+ ) -> Union[torch.Tensor, int]:
1290
+ r"""
1291
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
1292
+
1293
+ See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
1294
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
1295
+ experts is too unbalanced.
1296
+
1297
+ Args:
1298
+ gate_logits:
1299
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
1300
+ shape [batch_size X sequence_length, num_experts].
1301
+ num_experts:
1302
+ Number of experts
1303
+ top_k:
1304
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
1305
+ parameter.
1306
+ attention_mask (`torch.Tensor`, *optional*):
1307
+ The attention_mask used in forward function
1308
+ shape [batch_size X sequence_length] if not None.
1309
+
1310
+ Returns:
1311
+ The auxiliary loss.
1312
+ """
1313
+ if gate_logits is None or not isinstance(gate_logits, tuple):
1314
+ return 0
1315
+
1316
+ if isinstance(gate_logits, tuple):
1317
+ compute_device = gate_logits[0].device
1318
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
1319
+
1320
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
1321
+
1322
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
1323
+
1324
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
1325
+
1326
+ if attention_mask is None:
1327
+ # Compute the percentage of tokens routed to each experts
1328
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
1329
+
1330
+ # Compute the average probability of routing to these experts
1331
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
1332
+ else:
1333
+ batch_size, sequence_length = attention_mask.shape
1334
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
1335
+
1336
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
1337
+ expert_attention_mask = (
1338
+ attention_mask[None, :, :, None, None]
1339
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
1340
+ .reshape(-1, top_k, num_experts)
1341
+ .to(compute_device)
1342
+ )
1343
+
1344
+ # Compute the percentage of tokens routed to each experts
1345
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
1346
+ expert_attention_mask, dim=0
1347
+ )
1348
+
1349
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
1350
+ router_per_expert_attention_mask = (
1351
+ attention_mask[None, :, :, None]
1352
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
1353
+ .reshape(-1, num_experts)
1354
+ .to(compute_device)
1355
+ )
1356
+
1357
+ # Compute the average probability of routing to these experts
1358
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
1359
+ router_per_expert_attention_mask, dim=0
1360
+ )
1361
+
1362
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
1363
+ return overall_loss * num_experts
1364
+
1365
+
1366
+ class InternS1ProForConditionalGeneration(InternS1ProPreTrainedModel, GenerationMixin):
1367
+ _checkpoint_conversion_mapping = {}
1368
+ # Reference: fix gemma3 grad acc #37208
1369
+ accepts_loss_kwargs = False
1370
+ config: InternS1ProConfig
1371
+
1372
+ def __init__(self, config):
1373
+ super().__init__(config)
1374
+ self.model = InternS1ProModel(config)
1375
+ self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
1376
+
1377
+ self.post_init()
1378
+
1379
+ def get_input_embeddings(self):
1380
+ return self.model.get_input_embeddings()
1381
+
1382
+ def set_input_embeddings(self, value):
1383
+ self.model.set_input_embeddings(value)
1384
+
1385
+ def set_decoder(self, decoder):
1386
+ self.model.set_decoder(decoder)
1387
+
1388
+ def get_decoder(self):
1389
+ return self.model.get_decoder()
1390
+
1391
+ def get_video_features(
1392
+ self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None
1393
+ ):
1394
+ return self.model.get_video_features(pixel_values_videos, video_grid_thw)
1395
+
1396
+ def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None):
1397
+ return self.model.get_image_features(pixel_values, image_grid_thw)
1398
+
1399
+ # Make modules available through conditional class for BC
1400
+ @property
1401
+ def language_model(self):
1402
+ return self.model.language_model
1403
+
1404
+ @property
1405
+ def visual(self):
1406
+ return self.model.visual
1407
+
1408
+ @check_model_inputs()
1409
+ def forward(
1410
+ self,
1411
+ input_ids: torch.LongTensor = None,
1412
+ attention_mask: Optional[torch.Tensor] = None,
1413
+ position_ids: Optional[torch.LongTensor] = None,
1414
+ past_key_values: Optional[Cache] = None,
1415
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1416
+ labels: Optional[torch.LongTensor] = None,
1417
+ pixel_values: Optional[torch.Tensor] = None,
1418
+ pixel_values_videos: Optional[torch.FloatTensor] = None,
1419
+ image_grid_thw: Optional[torch.LongTensor] = None,
1420
+ video_grid_thw: Optional[torch.LongTensor] = None,
1421
+ cache_position: Optional[torch.LongTensor] = None,
1422
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1423
+ **kwargs: Unpack[TransformersKwargs],
1424
+ ) -> Union[tuple, Qwen3VLMoeCausalLMOutputWithPast]:
1425
+ r"""
1426
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1427
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1428
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1429
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1430
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
1431
+ The temporal, height and width of feature shape of each image in LLM.
1432
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
1433
+ The temporal, height and width of feature shape of each video in LLM.
1434
+
1435
+ Example:
1436
+ ```python
1437
+ >>> from PIL import Image
1438
+ >>> import requests
1439
+ >>> from transformers import AutoProcessor, Qwen3VLMoeForConditionalGeneration
1440
+
1441
+ >>> model = Qwen3VLMoeForConditionalGeneration.from_pretrained("Qwen/Qwen3-VL-30B-A3B-Instruct", dtype="auto", device_map="auto")
1442
+ >>> processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-30B-A3B-Instruct")
1443
+
1444
+ >>> messages = [
1445
+ {
1446
+ "role": "user",
1447
+ "content": [
1448
+ {
1449
+ "type": "image",
1450
+ "image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg",
1451
+ },
1452
+ {"type": "text", "text": "Describe this image in short."},
1453
+ ],
1454
+ }
1455
+ ]
1456
+
1457
+ >>> # Preparation for inference
1458
+ >>> inputs = processor.apply_chat_template(
1459
+ messages,
1460
+ tokenize=True,
1461
+ add_generation_prompt=True,
1462
+ return_dict=True,
1463
+ return_tensors="pt"
1464
+ )
1465
+ >>> inputs = inputs.to(model.device)
1466
+
1467
+ >>> # Generate
1468
+ >>> generated_ids = model.generate(**inputs, max_new_tokens=128)
1469
+ >>> generated_ids_trimmed = [
1470
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
1471
+ ]
1472
+ >>> processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1473
+ "A woman in a plaid shirt sits on a sandy beach at sunset, smiling as she gives a high-five to a yellow Labrador Retriever wearing a harness. The ocean waves roll in the background."
1474
+ ```"""
1475
+
1476
+ outputs = self.model(
1477
+ input_ids=input_ids,
1478
+ pixel_values=pixel_values,
1479
+ pixel_values_videos=pixel_values_videos,
1480
+ image_grid_thw=image_grid_thw,
1481
+ video_grid_thw=video_grid_thw,
1482
+ position_ids=position_ids,
1483
+ attention_mask=attention_mask,
1484
+ past_key_values=past_key_values,
1485
+ inputs_embeds=inputs_embeds,
1486
+ cache_position=cache_position,
1487
+ **kwargs,
1488
+ )
1489
+
1490
+ hidden_states = outputs[0]
1491
+
1492
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1493
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
1494
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
1495
+
1496
+ loss = None
1497
+ if labels is not None:
1498
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size)
1499
+
1500
+ aux_loss = None
1501
+ if kwargs.get("output_router_logits", False):
1502
+ aux_loss = load_balancing_loss_func(
1503
+ outputs.router_logits,
1504
+ self.config.text_config.num_experts,
1505
+ self.config.text_config.num_experts_per_tok,
1506
+ attention_mask,
1507
+ )
1508
+ if labels is not None:
1509
+ loss += self.config.text_config.router_aux_loss_coef * aux_loss.to(
1510
+ loss.device
1511
+ ) # make sure to reside in the same device
1512
+
1513
+ return Qwen3VLMoeCausalLMOutputWithPast(
1514
+ loss=loss,
1515
+ aux_loss=aux_loss,
1516
+ logits=logits,
1517
+ past_key_values=outputs.past_key_values,
1518
+ )
1519
+
1520
+ def prepare_inputs_for_generation(
1521
+ self,
1522
+ input_ids,
1523
+ past_key_values=None,
1524
+ attention_mask=None,
1525
+ inputs_embeds=None,
1526
+ cache_position=None,
1527
+ position_ids=None,
1528
+ use_cache=True,
1529
+ pixel_values=None,
1530
+ pixel_values_videos=None,
1531
+ image_grid_thw=None,
1532
+ video_grid_thw=None,
1533
+ **kwargs,
1534
+ ):
1535
+ # Overwritten -- in specific circumstances we don't want to forward image inputs to the model
1536
+
1537
+ model_inputs = super().prepare_inputs_for_generation(
1538
+ input_ids,
1539
+ past_key_values=past_key_values,
1540
+ attention_mask=attention_mask,
1541
+ inputs_embeds=inputs_embeds,
1542
+ cache_position=cache_position,
1543
+ position_ids=position_ids,
1544
+ pixel_values=pixel_values,
1545
+ pixel_values_videos=pixel_values_videos,
1546
+ image_grid_thw=image_grid_thw,
1547
+ video_grid_thw=video_grid_thw,
1548
+ use_cache=use_cache,
1549
+ **kwargs,
1550
+ )
1551
+
1552
+ model_inputs["position_ids"] = None
1553
+
1554
+ if cache_position[0] != 0:
1555
+ model_inputs["pixel_values"] = None
1556
+ model_inputs["pixel_values_videos"] = None
1557
+
1558
+ return model_inputs
1559
+
1560
+ def _get_image_nums_and_video_nums(
1561
+ self,
1562
+ input_ids: Optional[torch.LongTensor],
1563
+ inputs_embeds: Optional[torch.Tensor] = None,
1564
+ ) -> tuple[torch.Tensor, torch.Tensor]:
1565
+ """
1566
+ Get the number of images and videos for each sample to calculate the separation length of the sample tensor.
1567
+ These parameters are not passed through the processor to avoid unpredictable impacts from interface modifications.
1568
+
1569
+ Args:
1570
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
1571
+ Indices of input sequence tokens in the vocabulary.
1572
+
1573
+ Returns:
1574
+ image_nums (`torch.LongTensor` of shape `(batch_size, num_images_sample)`)
1575
+ video_nums (`torch.LongTensor` of shape `(batch_size, num_videos_sample)`)
1576
+ """
1577
+ image_token_id = self.config.image_token_id
1578
+ video_token_id = self.config.video_token_id
1579
+ vision_start_token_id = self.config.vision_start_token_id
1580
+
1581
+ if inputs_embeds is not None:
1582
+ vision_start_mask = (
1583
+ inputs_embeds
1584
+ == self.get_input_embeddings()(
1585
+ torch.tensor(vision_start_token_id, dtype=torch.long, device=inputs_embeds.device)
1586
+ )
1587
+ )[..., 0]
1588
+ image_mask = (
1589
+ inputs_embeds
1590
+ == self.get_input_embeddings()(
1591
+ torch.tensor(image_token_id, dtype=torch.long, device=inputs_embeds.device)
1592
+ )
1593
+ )[..., 0]
1594
+ video_mask = (
1595
+ inputs_embeds
1596
+ == self.get_input_embeddings()(
1597
+ torch.tensor(video_token_id, dtype=torch.long, device=inputs_embeds.device)
1598
+ )
1599
+ )[..., 0]
1600
+ else:
1601
+ vision_start_mask = input_ids == vision_start_token_id
1602
+ image_mask = input_ids == image_token_id
1603
+ video_mask = input_ids == video_token_id
1604
+
1605
+ vision_first_mask = torch.roll(vision_start_mask, shifts=1, dims=1)
1606
+ image_nums = torch.sum(vision_first_mask & image_mask, dim=1)
1607
+ video_nums = torch.sum(vision_first_mask & video_mask, dim=1)
1608
+
1609
+ return image_nums, video_nums
1610
+
1611
+ def _expand_inputs_for_generation(
1612
+ self,
1613
+ expand_size: int = 1,
1614
+ is_encoder_decoder: bool = False,
1615
+ input_ids: Optional[torch.LongTensor] = None,
1616
+ **model_kwargs,
1617
+ ) -> tuple[torch.LongTensor, dict[str, Any]]:
1618
+ # Overwritten -- Support for expanding tensors without a batch size dimension
1619
+ # e.g., pixel_values, image_grid_thw, pixel_values_videos, video_grid_thw, second_per_grid_t
1620
+ # pixel_values.shape[0] is sum(seqlen_images for samples)
1621
+ # image_grid_thw.shape[0] is sum(num_images for samples)
1622
+
1623
+ if expand_size == 1:
1624
+ return input_ids, model_kwargs
1625
+
1626
+ visual_keys = ["pixel_values", "image_grid_thw", "pixel_values_videos", "video_grid_thw", "second_per_grid_ts"]
1627
+
1628
+ def _expand_dict_for_generation_visual(dict_to_expand):
1629
+ image_grid_thw = model_kwargs.get("image_grid_thw", None)
1630
+ video_grid_thw = model_kwargs.get("video_grid_thw", None)
1631
+ image_nums, video_nums = self._get_image_nums_and_video_nums(
1632
+ input_ids, inputs_embeds=model_kwargs.get("inputs_embeds", None)
1633
+ )
1634
+
1635
+ def _repeat_interleave_samples(x, lengths, repeat_times):
1636
+ samples = torch.split(x, lengths)
1637
+ repeat_args = [repeat_times] + [1] * (x.dim() - 1)
1638
+ result = torch.cat([sample.repeat(*repeat_args) for sample in samples], dim=0)
1639
+ return result
1640
+
1641
+ for key in dict_to_expand:
1642
+ if key == "pixel_values":
1643
+ # split images into samples
1644
+ samples = torch.split(image_grid_thw, list(image_nums))
1645
+ # compute the sequence length of images for each sample
1646
+ lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
1647
+ dict_to_expand[key] = _repeat_interleave_samples(
1648
+ dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1649
+ )
1650
+ elif key == "image_grid_thw":
1651
+ # get the num of images for each sample
1652
+ lengths = list(image_nums)
1653
+ dict_to_expand[key] = _repeat_interleave_samples(
1654
+ dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1655
+ )
1656
+ elif key == "pixel_values_videos":
1657
+ samples = torch.split(video_grid_thw, list(video_nums))
1658
+ lengths = [torch.prod(sample, dim=1).sum() for sample in samples]
1659
+ dict_to_expand[key] = _repeat_interleave_samples(
1660
+ dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1661
+ )
1662
+ elif key == "video_grid_thw":
1663
+ lengths = list(video_nums)
1664
+ dict_to_expand[key] = _repeat_interleave_samples(
1665
+ dict_to_expand[key], lengths=lengths, repeat_times=expand_size
1666
+ )
1667
+ elif key == "second_per_grid_ts":
1668
+ dict_to_expand[key] = _repeat_interleave_samples(
1669
+ dict_to_expand[key], lengths=list(video_nums), repeat_times=expand_size
1670
+ )
1671
+ return dict_to_expand
1672
+
1673
+ def _expand_dict_for_generation(dict_to_expand):
1674
+ for key in dict_to_expand:
1675
+ if (
1676
+ key != "cache_position"
1677
+ and dict_to_expand[key] is not None
1678
+ and isinstance(dict_to_expand[key], torch.Tensor)
1679
+ and key not in visual_keys
1680
+ ):
1681
+ dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0)
1682
+ return dict_to_expand
1683
+
1684
+ model_kwargs = _expand_dict_for_generation_visual(model_kwargs)
1685
+
1686
+ if input_ids is not None:
1687
+ input_ids = input_ids.repeat_interleave(expand_size, dim=0)
1688
+
1689
+ model_kwargs = _expand_dict_for_generation(model_kwargs)
1690
+
1691
+ if is_encoder_decoder:
1692
+ if model_kwargs.get("encoder_outputs") is None:
1693
+ raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.")
1694
+ model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"])
1695
+
1696
+ return input_ids, model_kwargs
1697
+
1698
+ __all__ = [
1699
+ "InternS1ProVisionModel",
1700
+ "InternS1ProForConditionalGeneration",
1701
+ "InternS1ProModel",
1702
+ "InternS1ProPreTrainedModel",
1703
+ ]
modeling_rope_utils.py ADDED
@@ -0,0 +1,885 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from functools import wraps
17
+ from typing import Optional
18
+
19
+ from transformers.configuration_utils import PretrainedConfig
20
+ from transformers.utils import is_torch_available, logging
21
+ # copy from site-packages/transformers/modeling_rope_utils.py
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ if is_torch_available():
27
+ import torch
28
+
29
+
30
+ def dynamic_rope_update(rope_forward):
31
+ """
32
+ Decorator function to update the RoPE parameters in the forward pass, if the model is using a dynamic RoPE
33
+ (i.e. a RoPE implementation that may recompute its frequencies in the forward pass).
34
+
35
+ Args:
36
+ rope_forward (Callable):
37
+ The forward pass of the RoPE implementation.
38
+
39
+ Returns:
40
+ The decorated forward pass.
41
+ """
42
+
43
+ def longrope_frequency_update(self, position_ids, device):
44
+ """Longrope uses long factor if sequence is larger than original pretraining length, short otherwise."""
45
+ seq_len = torch.max(position_ids) + 1
46
+ if hasattr(self.config, "original_max_position_embeddings"):
47
+ original_max_position_embeddings = self.config.original_max_position_embeddings
48
+ else:
49
+ original_max_position_embeddings = self.config.max_position_embeddings
50
+ if seq_len > original_max_position_embeddings:
51
+ if not hasattr(self, "long_inv_freq"):
52
+ self.long_inv_freq, _ = self.rope_init_fn(
53
+ self.config, device, seq_len=original_max_position_embeddings + 1
54
+ )
55
+ self.register_buffer("inv_freq", self.long_inv_freq, persistent=False)
56
+ else:
57
+ # This .to() is needed if the model has been moved to a device after being initialized (because
58
+ # the buffer is automatically moved, but not the original copy)
59
+ self.original_inv_freq = self.original_inv_freq.to(device)
60
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
61
+
62
+ def dynamic_frequency_update(self, position_ids, device):
63
+ """
64
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
65
+ 1 - growing beyond the cached sequence length (allow scaling)
66
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
67
+ """
68
+ seq_len = torch.max(position_ids) + 1
69
+ if seq_len > self.max_seq_len_cached: # growth
70
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
71
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
72
+ self.max_seq_len_cached = seq_len
73
+
74
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
75
+ # This .to() is needed if the model has been moved to a device after being initialized (because
76
+ # the buffer is automatically moved, but not the original copy)
77
+ self.original_inv_freq = self.original_inv_freq.to(device)
78
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
79
+ self.max_seq_len_cached = self.original_max_seq_len
80
+
81
+ @wraps(rope_forward)
82
+ def wrapper(self, x, position_ids):
83
+ if "dynamic" in self.rope_type:
84
+ dynamic_frequency_update(self, position_ids, device=x.device)
85
+ elif self.rope_type == "longrope":
86
+ longrope_frequency_update(self, position_ids, device=x.device)
87
+ return rope_forward(self, x, position_ids)
88
+
89
+ return wrapper
90
+
91
+
92
+ def _compute_default_rope_parameters(
93
+ config: Optional[PretrainedConfig] = None,
94
+ device: Optional["torch.device"] = None,
95
+ seq_len: Optional[int] = None,
96
+ ) -> tuple["torch.Tensor", float]:
97
+ """
98
+ Computes the inverse frequencies according to the original RoPE implementation
99
+ Args:
100
+ config ([`~transformers.PretrainedConfig`]):
101
+ The model configuration. This function assumes that the config will provide at least the following
102
+ properties:
103
+
104
+ * rope_theta (`float`): The base wavelength from which the inverse frequencies will be derived.
105
+ * hidden_size (`int`): The numerator when deriving a head_dim, if not provided directly.
106
+ * num_attention_heads (`int`): The denominator when deriving a head_dim, if not provided directly.
107
+
108
+ Additionally, this function will make use of the following properties if they are found in the config:
109
+
110
+ * head_dim (`int`, *optional*): The size of the key-value heads in the model. If None, this value will be
111
+ derived as hidden_size // num_attention_heads.
112
+ * partial_rotary_factor (`float`, *optional*): If less than 1.0, inverse frequencies will be returned for
113
+ the first fraction of the head_dim. Defaults to 1.0.
114
+ device (`torch.device`):
115
+ The device to use for initialization of the inverse frequencies.
116
+ seq_len (`int`, *optional*):
117
+ The current sequence length. Unused for this type of RoPE.
118
+
119
+ Returns:
120
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
121
+ post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
122
+ """
123
+ base = config.rope_theta
124
+ partial_rotary_factor = getattr(config, "partial_rotary_factor", 1.0)
125
+ head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
126
+ dim = int(head_dim * partial_rotary_factor)
127
+
128
+ attention_factor = 1.0 # Unused in this type of RoPE
129
+
130
+ # Compute the inverse frequencies
131
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim))
132
+
133
+ # Compute FoPE if specified
134
+ use_fope = (
135
+ config.rope_scaling.get("fope_init_factor", None) is not None \
136
+ or config.rope_scaling.get("fope_sep_heads", None) is not None \
137
+ or config.rope_scaling.get("num_inv_freq", None) is not None
138
+ ) and config.rope_scaling.get("type", config.rope_scaling.get("rope_type", None)) == "default"
139
+
140
+ if use_fope:
141
+ inv_freq, attention_factor = _compute_fope_parameters(config, device, seq_len, inv_freq, attention_factor)
142
+
143
+ return inv_freq, attention_factor
144
+
145
+
146
+ def _compute_linear_scaling_rope_parameters(
147
+ config: Optional[PretrainedConfig] = None,
148
+ device: Optional["torch.device"] = None,
149
+ seq_len: Optional[int] = None,
150
+ ) -> tuple["torch.Tensor", float]:
151
+ """
152
+ Computes the inverse frequencies with linear scaling. Credits to the Reddit user /u/kaiokendev
153
+ Args:
154
+ config ([`~transformers.PretrainedConfig`]):
155
+ The model configuration. This function assumes that the config will provide at least the following
156
+ properties:
157
+
158
+ * rope_theta (`float`): The base wavelength from which the inverse frequencies will be derived.
159
+ * hidden_size (`int`): The numerator when deriving a head_dim, if not provided directly.
160
+ * num_attention_heads (`int`): The denominator when deriving a head_dim, if not provided directly.
161
+
162
+ Additionally, this function will make use of the following properties if they are found in the config:
163
+
164
+ * head_dim (`int`, *optional*): The size of the key-value heads in the model. If None, this value will be
165
+ derived as hidden_size // num_attention_heads.
166
+ * partial_rotary_factor (`float`, *optional*): If less than 1.0, inverse frequencies will be returned for
167
+ the first fraction of the head_dim. Defaults to 1.0.
168
+ device (`torch.device`):
169
+ The device to use for initialization of the inverse frequencies.
170
+ seq_len (`int`, *optional*):
171
+ The current sequence length. Unused for this type of RoPE.
172
+
173
+ Returns:
174
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
175
+ post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
176
+ """
177
+ factor = config.rope_scaling["factor"]
178
+
179
+ # Gets the default RoPE parameters
180
+ inv_freq, attention_factor = _compute_default_rope_parameters(config, device, seq_len)
181
+
182
+ # Then applies linear scaling to the frequencies.
183
+ # NOTE: originally, scaling was applied to the position_ids. However, we get `embs = inv_freq @ position_ids`, so
184
+ # applying scaling to the inverse frequencies is equivalent.
185
+ inv_freq /= factor
186
+
187
+
188
+ # Compute FoPE if specified
189
+ use_fope = (
190
+ config.rope_scaling.get("fope_init_factor", None) is not None \
191
+ or config.rope_scaling.get("fope_sep_heads", None) is not None \
192
+ or config.rope_scaling.get("num_inv_freq", None) is not None
193
+ )
194
+
195
+ if use_fope:
196
+ inv_freq, attention_factor = _compute_fope_parameters(config, device, seq_len, inv_freq, attention_factor)
197
+
198
+ return inv_freq, attention_factor
199
+
200
+
201
+ def _compute_dynamic_ntk_parameters(
202
+ config: Optional[PretrainedConfig] = None,
203
+ device: Optional["torch.device"] = None,
204
+ seq_len: Optional[int] = None,
205
+ ) -> tuple["torch.Tensor", float]:
206
+ """
207
+ Computes the inverse frequencies with NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla
208
+
209
+ Args:
210
+ config ([`~transformers.PretrainedConfig`]):
211
+ The model configuration. This function assumes that the config will provide at least the following
212
+ properties:
213
+
214
+ * rope_theta (`float`): The base wavelength from which the inverse frequencies will be derived.
215
+ * hidden_size (`int`): The numerator when deriving a head_dim, if not provided directly.
216
+ * num_attention_heads (`int`): The denominator when deriving a head_dim, if not provided directly.
217
+ * max_position_embeddings (`int`): The default sequence length used to update the dynamic RoPE at
218
+ inference time
219
+ * rope_scaling (`dict[str, float]`): The standard RoPE scaling parameters, from which `factor`
220
+ will be accessed. The value of `factor` is used to determine the new base frequency, along with the
221
+ current sequence length (seq_len), the maximum positional embeddings (max_position_embeddings), and the
222
+ computed dimensionality (dim) of the rotary embeddings. If seq_len <= max_position_embeddings, this
223
+ factor has no effect. If seq_len <= max_position_embeddings, this factor effectively stretches the
224
+ context window using an exponent derived from `dim`.
225
+
226
+ Additionally, this function will make use of the following properties if they are found in the config:
227
+
228
+ * head_dim (`int`, *optional*): The size of the key-value heads in the model. If None, this value will be
229
+ derived as hidden_size // num_attention_heads.
230
+ * partial_rotary_factor (`float`, *optional*): If less than 1.0, inverse frequencies will be returned for
231
+ the first fraction of the head_dim. Defaults to 1.0.
232
+ device (`torch.device`):
233
+ The device to use for initialization of the inverse frequencies.
234
+ seq_len (`int`, *optional*):
235
+ The current sequence length, used to update the dynamic RoPE at inference time. If `None` or shorter than
236
+ max_position_embeddings, this value will be overridden by max_position_embeddings.
237
+
238
+ Returns:
239
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
240
+ post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
241
+ """
242
+ # TODO (joao): use the new `original_max_position_embeddings` from rope_scaling
243
+ base = config.rope_theta
244
+ partial_rotary_factor = getattr(config, "partial_rotary_factor", 1.0)
245
+ head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
246
+ dim = int(head_dim * partial_rotary_factor)
247
+ max_position_embeddings = config.max_position_embeddings
248
+ factor = config.rope_scaling["factor"]
249
+
250
+ attention_factor = 1.0 # Unused in this type of RoPE
251
+
252
+ # seq_len: default to max_position_embeddings, e.g. at init time
253
+ if seq_len is None:
254
+ seq_len = max_position_embeddings
255
+ elif isinstance(seq_len, torch.Tensor):
256
+ seq_len = torch.maximum(
257
+ seq_len,
258
+ torch.tensor(max_position_embeddings, dtype=seq_len.dtype, device=seq_len.device),
259
+ )
260
+ else:
261
+ seq_len = max(seq_len, max_position_embeddings)
262
+
263
+ # Compute the inverse frequencies
264
+ base = base * ((factor * seq_len / max_position_embeddings) - (factor - 1)) ** (dim / (dim - 2))
265
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim))
266
+
267
+
268
+ # Compute FoPE if specified
269
+ use_fope = (
270
+ config.rope_scaling.get("fope_init_factor", None) is not None \
271
+ or config.rope_scaling.get("fope_sep_heads", None) is not None \
272
+ or config.rope_scaling.get("num_inv_freq", None) is not None
273
+ )
274
+
275
+ if use_fope:
276
+ inv_freq, attention_factor = _compute_fope_parameters(config, device, seq_len, inv_freq, attention_factor)
277
+
278
+ return inv_freq, attention_factor
279
+
280
+
281
+ def _compute_yarn_parameters(
282
+ config: PretrainedConfig, device: "torch.device", seq_len: Optional[int] = None
283
+ ) -> tuple["torch.Tensor", float]:
284
+ """
285
+ Computes the inverse frequencies with NTK scaling. Please refer to the
286
+ [original paper](https://huggingface.co/papers/2309.00071)
287
+
288
+ Args:
289
+ config ([`~transformers.PretrainedConfig`]):
290
+ The model configuration. This function assumes that the config will provide at least the following
291
+ properties:
292
+
293
+ * rope_theta (`float`): The base wavelength from which the inverse frequencies will be derived.
294
+ * hidden_size (`int`): The numerator when deriving a head_dim, if not provided directly.
295
+ * num_attention_heads (`int`): The denominator when deriving a head_dim, if not provided directly.
296
+ * max_position_embeddings (`int`): The maximum length of the positional embeddings.
297
+ * rope_scaling (`dict[str, float | int]`): The standard RoPE scaling parameters, from which the following
298
+ keys will be accessed:
299
+ * `attention_factor` (`float`, *optional*): The scaling factor to be applied to the computed cos/sin.
300
+ If None, the value is inferred from `factor`, `mscale`, and `mscale_all_dim` as avaialble.
301
+ * `beta_fast` (`float`, *optional*, defaults to 32): Parameter to set the boundary for extrapolation
302
+ (only) in the linear ramp function.
303
+ * `beta_slow` (`float`, *optional*, defaults to 1): Parameter to set the boundary for interpolation
304
+ (only) in the linear ramp function.
305
+ * `factor` (`float`, *optional*): The scaling factor applied when interpolating the position IDs to
306
+ extend the possible context length. Additionally, if `attention_factor` is None, the log of this
307
+ value is used to compute a value for `attention_factor`, possibly in conjunciton with `mscale` and
308
+ `mscale_all_dim`, if provided.
309
+ * `mscale` (`float`, *optional*): If `attention_factor` is None and both `mscale` and
310
+ `mscale_all_dim` are provided, `mscale` acts scalar augmenting `log(factor)` when computing the
311
+ numerator for the inferred value of `attention_factor`. If not provided, `attention_factor` will be
312
+ calculated based on `factor` only.
313
+ * `mscale_all_dim` (`float`, *optional*): If `attention_factor` is None and both `mscale` and
314
+ `mscale_all_dim` are provided, `mscale_all_dim` acts scalar augmenting `log(factor)` when computing
315
+ the denominator for the inferred value of `attention_factor`. If not provided, `attention_factor`
316
+ will be calculated based on `factor` only.
317
+ * `original_max_position_embeddings` (`int`, *optional*): The original max position embeddings used
318
+ during pretraining. If not provided, the function falls back to `max_position_embeddings`.
319
+ * `truncate` (`bool`, *optional*): Whether to truncate the correction range.
320
+
321
+ Additionally, this function will make use of the following properties if they are found in the config:
322
+
323
+ * head_dim (`int`, *optional*): The size of the key-value heads in the model. If None, this value will be
324
+ derived as hidden_size // num_attention_heads.
325
+ * partial_rotary_factor (`float`, *optional*, defaults to 1.0): If less than 1.0, inverse frequencies
326
+ will be returned for the first fraction of the head_dim.
327
+ device (`torch.device`):
328
+ The device to use for initialization of the inverse frequencies.
329
+ seq_len (`int`, *optional*):
330
+ The current sequence length. Unused for this type of RoPE.
331
+
332
+ Returns:
333
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
334
+ post-processing scaling factor applied to the computed cos/sin.
335
+ """
336
+
337
+ base = config.rope_theta
338
+ partial_rotary_factor = getattr(config, "partial_rotary_factor", 1.0)
339
+ head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
340
+ dim = int(head_dim * partial_rotary_factor)
341
+ factor = config.rope_scaling["factor"]
342
+ attention_factor = config.rope_scaling.get("attention_factor")
343
+ mscale = config.rope_scaling.get("mscale")
344
+ mscale_all_dim = config.rope_scaling.get("mscale_all_dim")
345
+ original_max_position_embeddings = (
346
+ config.rope_scaling.get("original_max_position_embeddings") or config.max_position_embeddings
347
+ )
348
+
349
+ def get_mscale(scale, mscale=1):
350
+ if scale <= 1:
351
+ return 1.0
352
+ return 0.1 * mscale * math.log(scale) + 1.0
353
+
354
+ # Sets the attention factor as suggested in the paper
355
+ if attention_factor is None:
356
+ if mscale and mscale_all_dim:
357
+ attention_factor = float(get_mscale(factor, mscale) / get_mscale(factor, mscale_all_dim))
358
+ else:
359
+ attention_factor = get_mscale(factor)
360
+
361
+ # Optional config options
362
+ # beta_fast/beta_slow: as suggested in the paper, default to 32 and 1 respectively
363
+ beta_fast = config.rope_scaling.get("beta_fast") or 32
364
+ beta_slow = config.rope_scaling.get("beta_slow") or 1
365
+
366
+ # Compute the inverse frequencies
367
+ def find_correction_dim(num_rotations, dim, base, max_position_embeddings):
368
+ """Inverse dimension formula to find the dimension based on the number of rotations"""
369
+ return (dim * math.log(max_position_embeddings / (num_rotations * 2 * math.pi))) / (2 * math.log(base))
370
+
371
+ def find_correction_range(low_rot, high_rot, dim, base, max_position_embeddings, truncate):
372
+ """Find dimension range bounds based on rotations"""
373
+ low = find_correction_dim(low_rot, dim, base, max_position_embeddings)
374
+ high = find_correction_dim(high_rot, dim, base, max_position_embeddings)
375
+ if truncate:
376
+ low = math.floor(low)
377
+ high = math.ceil(high)
378
+ return max(low, 0), min(high, dim - 1)
379
+
380
+ def linear_ramp_factor(min, max, dim):
381
+ if min == max:
382
+ max += 0.001 # Prevent singularity
383
+
384
+ linear_func = (torch.arange(dim, dtype=torch.float32) - min) / (max - min)
385
+ ramp_func = torch.clamp(linear_func, 0, 1)
386
+ return ramp_func
387
+
388
+ # Note on variable naming: "interpolation" comes from the original technique, where we interpolate the position IDs
389
+ # to expand the possible context length. In other words, interpolation = apply scaling factor.
390
+ pos_freqs = base ** (torch.arange(0, dim, 2).to(device=device, dtype=torch.float) / dim)
391
+ inv_freq_extrapolation = 1.0 / pos_freqs
392
+ inv_freq_interpolation = 1.0 / (factor * pos_freqs)
393
+
394
+ truncate = config.rope_scaling.get("truncate", True)
395
+ low, high = find_correction_range(beta_fast, beta_slow, dim, base, original_max_position_embeddings, truncate)
396
+
397
+ # Get n-dimensional rotational scaling corrected for extrapolation
398
+ inv_freq_extrapolation_factor = 1 - linear_ramp_factor(low, high, dim // 2).to(device=device, dtype=torch.float)
399
+ inv_freq = (
400
+ inv_freq_interpolation * (1 - inv_freq_extrapolation_factor)
401
+ + inv_freq_extrapolation * inv_freq_extrapolation_factor
402
+ )
403
+
404
+ # Compute FoPE if specified
405
+ use_fope = (
406
+ config.rope_scaling.get("fope_init_factor", None) is not None \
407
+ or config.rope_scaling.get("fope_sep_heads", None) is not None \
408
+ or config.rope_scaling.get("num_inv_freq", None) is not None
409
+ )
410
+
411
+ if use_fope:
412
+ inv_freq, attention_factor = _compute_fope_parameters(config, device, seq_len, inv_freq, attention_factor)
413
+
414
+ return inv_freq, attention_factor
415
+
416
+
417
+ def _compute_longrope_parameters(
418
+ config: PretrainedConfig, device: "torch.device", seq_len: Optional[int] = None
419
+ ) -> tuple["torch.Tensor", float]:
420
+ """
421
+ Computes the inverse frequencies with LongRoPE scaling. Please refer to the
422
+ [original implementation](https://github.com/microsoft/LongRoPE)
423
+
424
+ Args:
425
+ config ([`~transformers.PretrainedConfig`]):
426
+ The model configuration. This function assumes that the config will provide at least the following
427
+ properties:
428
+
429
+ * rope_theta (`float`): The base wavelength from which the inverse frequencies will be derived.
430
+ * hidden_size (`int`): The numerator when deriving a head_dim, if not provided directly.
431
+ * num_attention_heads (`int`): The denominator when deriving a head_dim, if not provided directly.
432
+ * max_position_embeddings (`int`): The maximum length of the positional embeddings.
433
+ * original_max_position_embeddings (`int`, *optional*): The original max position embeddings used during
434
+ pretraining. If not provided, defaults to `max_position_embeddings`.
435
+ * rope_scaling (`dict[str, float]`): The standard RoPE scaling parameters, from which the following keys
436
+ will be accessed:
437
+ * `attention_factor` (`float`, *optional*): The scaling factor to be applied on the attention
438
+ computation. If unspecified, it defaults to value recommended by the implementation, inferred from
439
+ the value of `factor`.
440
+ * `factor` (`float`, *optional*): The scaling factor to apply to the RoPE embeddings. If both
441
+ `max_position_embeddings` and `original_max_position_embeddings` are provided, this value will be
442
+ overridden s the ratio between those values.
443
+ * `long_factor` (`float`, *optional*): The scale factor applied when computing the inverse
444
+ frequencies if `seq_len` is provided and greater than `original_max_position_embeddings`.
445
+ * `short_factor` (`float`, *optional*): The scale factor applied when computing the inverse
446
+ frequencies if `seq_len` is None or less-than-or-equal-to `original_max_position_embeddings`.
447
+
448
+ Additionally, this function will make use of the following properties if they are found in the config:
449
+
450
+ * head_dim (`int`, *optional*): The size of the key-value heads in the model. If None, this value will be
451
+ derived as hidden_size // num_attention_heads.
452
+ * partial_rotary_factor (`float`, *optional*, defaults to 1.0): If less than 1.0, inverse frequencies
453
+ will be returned for the first fraction of the head_dim.
454
+ device (`torch.device`):
455
+ The device to use for initialization of the inverse frequencies.
456
+ seq_len (`int`, *optional*):
457
+ The current sequence length.
458
+
459
+ Returns:
460
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
461
+ post-processing scaling factor applied to the computed cos/sin.
462
+ """
463
+ # TODO (joao): use the new `original_max_position_embeddings` from rope_scaling
464
+ base = config.rope_theta
465
+ partial_rotary_factor = getattr(config, "partial_rotary_factor", 1.0)
466
+ head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
467
+ dim = int(head_dim * partial_rotary_factor)
468
+ long_factor = config.rope_scaling["long_factor"]
469
+ short_factor = config.rope_scaling["short_factor"]
470
+ factor = config.rope_scaling.get("factor")
471
+ attention_factor = config.rope_scaling.get("attention_factor")
472
+
473
+ # NOTE: Phi3 (and potentially other models) modify `max_position_embeddings` and have a
474
+ # `original_max_position_embeddings` field containing the pretrained value. They use the ratio between these two
475
+ # values to compute the default attention scaling factor, instead of using `factor`.
476
+ if original_max_position_embeddings := getattr(config, "original_max_position_embeddings", None):
477
+ factor = config.max_position_embeddings / original_max_position_embeddings
478
+ else:
479
+ original_max_position_embeddings = config.max_position_embeddings
480
+
481
+ # Sets the attention factor as suggested in the paper
482
+ if attention_factor is None:
483
+ if factor <= 1.0:
484
+ attention_factor = 1.0
485
+ else:
486
+ attention_factor = math.sqrt(1 + math.log(factor) / math.log(original_max_position_embeddings))
487
+
488
+ # Compute the inverse frequencies -- scaled based on the target sequence length
489
+ if seq_len and seq_len > original_max_position_embeddings:
490
+ ext_factors = torch.tensor(long_factor, dtype=torch.float32, device=device)
491
+ else:
492
+ ext_factors = torch.tensor(short_factor, dtype=torch.float32, device=device)
493
+ inv_freq_shape = torch.arange(0, dim, 2, dtype=torch.int64, device=device).float() / dim
494
+ inv_freq = 1.0 / (ext_factors * base**inv_freq_shape)
495
+
496
+ # Compute FoPE if specified
497
+ use_fope = (
498
+ config.rope_scaling.get("fope_init_factor", None) is not None \
499
+ or config.rope_scaling.get("fope_sep_heads", None) is not None \
500
+ or config.rope_scaling.get("num_inv_freq", None) is not None
501
+ )
502
+
503
+ if use_fope:
504
+ inv_freq, attention_factor = _compute_fope_parameters(config, device, seq_len, inv_freq, attention_factor)
505
+
506
+ return inv_freq, attention_factor
507
+
508
+
509
+ def _compute_llama3_parameters(
510
+ config: PretrainedConfig, device: "torch.device", seq_len: Optional[int] = None
511
+ ) -> tuple["torch.Tensor", float]:
512
+ """
513
+ Computes the inverse frequencies for llama 3.1.
514
+
515
+ Args:
516
+ config ([`~transformers.PretrainedConfig`]):
517
+ The model configuration. This function assumes that the config will provide at least the following
518
+ properties:
519
+
520
+ * rope_theta (`float`): The base wavelength from which the inverse frequencies will be derived.
521
+ * hidden_size (`int`): The numerator when deriving a head_dim, if not provided directly.
522
+ * num_attention_heads (`int`): The denominator when deriving a head_dim, if not provided directly.
523
+ * rope_scaling (`dict[str, float | int]`): The standard RoPE scaling parameters, from which the following
524
+ keys will be accessed:
525
+ * `factor` (`float`, *optional*): The scaling factor applied to the inverse frequencies when 1) the
526
+ wavelength is greater than `low_freq_wavelen` prior to smoothing, and 2) to all inverse frequencies
527
+ during smoothing.
528
+ * `high_freq_factor` (`float`): The scale factor used to compute `high_freq_wavelen` and
529
+ the value for the denominator of the smoothing factor prior to the `low_freq_factor` shift.
530
+ * `low_freq_factor` (`float`): The scale factor used to compute `low_freq_wavelen` and
531
+ the shift applied to the numerator and denominator of the smoothing factor.
532
+ frequencies if `seq_len` is None or less-than-or-equal-to `original_max_position_embeddings`.
533
+ * `original_max_position_embeddings` (`int`): The original max position embeddings used
534
+ during pretraining. If not provided, the function falls back to `max_position_embeddings`.
535
+
536
+ Additionally, this function will make use of the following properties if they are found in the config:
537
+
538
+ * head_dim (`int`, *optional*): The size of the key-value heads in the model. If None, this value will be
539
+ derived as hidden_size // num_attention_heads.
540
+ * partial_rotary_factor (`float`, *optional*): If less than 1.0, inverse frequencies will be returned for
541
+ the first fraction of the head_dim. Defaults to 1.0.
542
+ device (`torch.device`):
543
+ The device to use for initialization of the inverse frequencies.
544
+ seq_len (`int`, *optional*):
545
+ The current sequence length. Unused for this type of RoPE.
546
+ Returns:
547
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
548
+ post-processing scaling factor applied to the computed cos/sin.
549
+ """
550
+ # Gets the default RoPE parameters
551
+ inv_freq, attention_factor = _compute_default_rope_parameters(config, device, seq_len)
552
+
553
+ factor = config.rope_scaling["factor"] # `8` in the original implementation
554
+ low_freq_factor = config.rope_scaling["low_freq_factor"] # `1` in the original implementation
555
+ high_freq_factor = config.rope_scaling["high_freq_factor"] # `4` in the original implementation
556
+ old_context_len = config.rope_scaling["original_max_position_embeddings"] # `8192` in the original implementation
557
+
558
+ low_freq_wavelen = old_context_len / low_freq_factor
559
+ high_freq_wavelen = old_context_len / high_freq_factor
560
+
561
+ wavelen = 2 * math.pi / inv_freq
562
+ # wavelen < high_freq_wavelen: do nothing
563
+ # wavelen > low_freq_wavelen: divide by factor
564
+ inv_freq_llama = torch.where(wavelen > low_freq_wavelen, inv_freq / factor, inv_freq)
565
+ # otherwise: interpolate between the two, using a smooth factor
566
+ smooth_factor = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)
567
+ smoothed_inv_freq = (1 - smooth_factor) * inv_freq_llama / factor + smooth_factor * inv_freq_llama
568
+ is_medium_freq = ~(wavelen < high_freq_wavelen) * ~(wavelen > low_freq_wavelen)
569
+ inv_freq_llama = torch.where(is_medium_freq, smoothed_inv_freq, inv_freq_llama)
570
+
571
+ # Compute FoPE if specified
572
+ use_fope = (
573
+ config.rope_scaling.get("fope_init_factor", None) is not None \
574
+ or config.rope_scaling.get("fope_sep_heads", None) is not None \
575
+ or config.rope_scaling.get("num_inv_freq", None) is not None
576
+ )
577
+
578
+ if use_fope:
579
+ inv_freq_llama, attention_factor = _compute_fope_parameters(config, device, seq_len, inv_freq_llama, attention_factor)
580
+
581
+ return inv_freq_llama, attention_factor
582
+
583
+ def _compute_fope_parameters(
584
+ config: PretrainedConfig, device: "torch.device", seq_len: Optional[int] = None, inv_freq: Optional["torch.Tensor"] = None, attention_factor: Optional[float] = None
585
+ ) -> tuple["torch.Tensor", float]:
586
+
587
+ # inv_freq, attention_factor = _compute_default_rope_parameters(config, device, seq_len)
588
+
589
+ assert (inv_freq[:-1] > inv_freq[1:]).all(), "Expected inv_freq to be in decreasing order"
590
+
591
+ inv_freq_idx_selected = torch.ones_like(inv_freq, dtype=torch.bool)
592
+ if config.rope_scaling.get("num_inv_freq", None) is not None:
593
+ num_inv_freq = config.rope_scaling["num_inv_freq"]
594
+ inv_freq_idx_selected[num_inv_freq:] = False
595
+ else:
596
+ inv_freq_idx_selected = inv_freq > (2.0 * torch.pi / config.max_position_embeddings)
597
+ num_inv_freq = inv_freq_idx_selected.sum().item()
598
+ inv_freq = inv_freq[inv_freq_idx_selected]
599
+
600
+ return inv_freq, attention_factor
601
+
602
+ # This maps the "rope_type" string field in rope config to the corresponding function to compute the RoPE parameters
603
+ # from the model config. You can append new {'rope_type': callable} pairs to this dictionary to enable custom RoPE
604
+ # parameterizations, as long as the callable has the same signature.
605
+ ROPE_INIT_FUNCTIONS = {
606
+ "default": _compute_default_rope_parameters,
607
+ "linear": _compute_linear_scaling_rope_parameters,
608
+ "dynamic": _compute_dynamic_ntk_parameters,
609
+ "yarn": _compute_yarn_parameters,
610
+ "longrope": _compute_longrope_parameters,
611
+ "llama3": _compute_llama3_parameters,
612
+ }
613
+
614
+ def _check_received_keys(
615
+ rope_type: str,
616
+ received_keys: set,
617
+ required_keys: set,
618
+ optional_keys: Optional[set] = None,
619
+ ignore_keys: Optional[set] = None,
620
+ ):
621
+ """Compare the received keys in `config.rope_scaling` against the expected and optional keys"""
622
+ # BC: "rope_type" was originally "type" -- let's check for "rope_type" when "type" is present
623
+ if "type" in received_keys:
624
+ received_keys -= {"type"}
625
+ required_keys.add("rope_type")
626
+
627
+ # Some models need to store model-specific keys, and we don't want to throw warning at them
628
+ if ignore_keys is not None:
629
+ received_keys -= ignore_keys
630
+
631
+ missing_keys = required_keys - received_keys
632
+ if missing_keys:
633
+ raise KeyError(f"Missing required keys in `rope_scaling` for 'rope_type'='{rope_type}': {missing_keys}")
634
+
635
+ if optional_keys is not None:
636
+ unused_keys = received_keys - required_keys - optional_keys
637
+ else:
638
+ unused_keys = received_keys - required_keys
639
+ if unused_keys:
640
+ logger.warning(f"Unrecognized keys in `rope_scaling` for 'rope_type'='{rope_type}': {unused_keys}")
641
+
642
+
643
+ def _validate_default_rope_parameters(config: PretrainedConfig, ignore_keys: Optional[set] = None):
644
+ rope_scaling = config.rope_scaling
645
+ rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type"
646
+ required_keys = {"rope_type"}
647
+ received_keys = set(rope_scaling.keys())
648
+ _check_received_keys(rope_type, received_keys, required_keys, ignore_keys=ignore_keys)
649
+
650
+
651
+ def _validate_linear_scaling_rope_parameters(config: PretrainedConfig, ignore_keys: Optional[set] = None):
652
+ rope_scaling = config.rope_scaling
653
+ rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type"
654
+ required_keys = {"rope_type", "factor"}
655
+ received_keys = set(rope_scaling.keys())
656
+ _check_received_keys(rope_type, received_keys, required_keys, ignore_keys=ignore_keys)
657
+
658
+ factor = rope_scaling["factor"]
659
+ if factor is None or not isinstance(factor, float) or factor < 1.0:
660
+ logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}")
661
+
662
+
663
+ def _validate_dynamic_scaling_rope_parameters(config: PretrainedConfig, ignore_keys: Optional[set] = None):
664
+ rope_scaling = config.rope_scaling
665
+ rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type"
666
+ required_keys = {"rope_type", "factor"}
667
+ # TODO (joao): update logic for the inclusion of `original_max_position_embeddings`
668
+ optional_keys = {"original_max_position_embeddings"}
669
+ received_keys = set(rope_scaling.keys())
670
+ _check_received_keys(rope_type, received_keys, required_keys, optional_keys, ignore_keys=ignore_keys)
671
+
672
+ factor = rope_scaling["factor"]
673
+ if factor is None or not isinstance(factor, float) or factor < 1.0:
674
+ logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}")
675
+
676
+
677
+ def _validate_yarn_parameters(config: PretrainedConfig, ignore_keys: Optional[set] = None):
678
+ rope_scaling = config.rope_scaling
679
+ rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type"
680
+ required_keys = {"rope_type", "factor"}
681
+ optional_keys = {
682
+ "attention_factor",
683
+ "beta_fast",
684
+ "beta_slow",
685
+ "original_max_position_embeddings",
686
+ "mscale",
687
+ "mscale_all_dim",
688
+ "truncate",
689
+ }
690
+ received_keys = set(rope_scaling.keys())
691
+ _check_received_keys(rope_type, received_keys, required_keys, optional_keys, ignore_keys=ignore_keys)
692
+
693
+ factor = rope_scaling["factor"]
694
+ if factor is None or not isinstance(factor, float) or factor < 1.0:
695
+ logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}")
696
+
697
+ attention_factor = rope_scaling.get("attention_factor")
698
+ if attention_factor is not None and (not isinstance(attention_factor, float) or attention_factor < 0):
699
+ logger.warning(
700
+ f"`rope_scaling`'s attention_factor field must be a float greater than 0, got {attention_factor}"
701
+ )
702
+ beta_fast = rope_scaling.get("beta_fast")
703
+ if beta_fast is not None and not isinstance(beta_fast, float):
704
+ logger.warning(f"`rope_scaling`'s beta_fast field must be a float, got {beta_fast}")
705
+ beta_slow = rope_scaling.get("beta_slow")
706
+ if beta_slow is not None and not isinstance(beta_slow, float):
707
+ logger.warning(f"`rope_scaling`'s beta_slow field must be a float, got {beta_slow}")
708
+
709
+ if (beta_fast or 32) < (beta_slow or 1):
710
+ logger.warning(
711
+ f"`rope_scaling`'s beta_fast field must be greater than beta_slow, got beta_fast={beta_fast} "
712
+ f"(defaults to 32 if None) and beta_slow={beta_slow} (defaults to 1 if None)"
713
+ )
714
+
715
+ # Models should set `config.rope_scaling["original_max_position_embeddings"]` to their original (pre-yarn) context
716
+ # length, with `config.max_position_embeddings` corresponding to their post-yarn context length.
717
+ # However, for BC purposes, we allow the former to be unset.
718
+ original_max_position_embeddings = config.rope_scaling.get("original_max_position_embeddings")
719
+ if original_max_position_embeddings is not None:
720
+ # Double-check: `factor` should be the ratio between the pre-yarn and post-yarn context lengths.
721
+ implicit_factor = config.max_position_embeddings / original_max_position_embeddings
722
+ if implicit_factor != factor:
723
+ logger.warning_once(
724
+ f"The explicitly set RoPE scaling factor (config.rope_scaling['factor'] = {factor}) does not match "
725
+ "the ratio implicitly set by other parameters (implicit factor = "
726
+ "post-yarn context length / pre-yarn context length = "
727
+ "config.max_position_embeddings / config.rope_scaling['original_max_position_embeddings'] = "
728
+ f"{implicit_factor}). Using the explicit factor ({factor}) in YaRN. This may cause unexpected "
729
+ "behaviour in model usage, please correct the 'max_position_embeddings' fields in the model config."
730
+ )
731
+ # No `config.rope_scaling["original_max_position_embeddings"]`. Is `config.max_position_embeddings` the
732
+ # pre-yarn or the post-yarn context length?
733
+ # BC: we assume it is the pre-yarn context length.
734
+ else:
735
+ logger.warning_once(
736
+ "config.rope_scaling['original_max_position_embeddings'], the pre-yarn context length, is unset. We will "
737
+ "**assume** config.max_position_embeddings holds the pre-yarn context length. Some use cases may expect "
738
+ "config.max_position_embeddings to hold the post-yarn context length (pre-yarn context length * "
739
+ "factor) -- we recommend updating both fields for optimal downstream model usage."
740
+ )
741
+
742
+
743
+ def _validate_longrope_parameters(config: PretrainedConfig, ignore_keys: Optional[set] = None):
744
+ rope_scaling = config.rope_scaling
745
+ rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type"
746
+ required_keys = {"rope_type", "short_factor", "long_factor"}
747
+ # TODO (joao): update logic for the inclusion of `original_max_position_embeddings`
748
+ optional_keys = {"attention_factor", "factor", "original_max_position_embeddings"}
749
+ received_keys = set(rope_scaling.keys())
750
+ _check_received_keys(rope_type, received_keys, required_keys, optional_keys, ignore_keys=ignore_keys)
751
+
752
+ partial_rotary_factor = getattr(config, "partial_rotary_factor", 1.0)
753
+ head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
754
+ dim = int(head_dim * partial_rotary_factor)
755
+
756
+ short_factor = rope_scaling.get("short_factor")
757
+ if not isinstance(short_factor, list) and all(isinstance(x, (int, float)) for x in short_factor):
758
+ logger.warning(f"`rope_scaling`'s short_factor field must be a list of numbers, got {short_factor}")
759
+ if len(short_factor) != dim // 2:
760
+ logger.warning(f"`rope_scaling`'s short_factor field must have length {dim // 2}, got {len(short_factor)}")
761
+
762
+ long_factor = rope_scaling.get("long_factor")
763
+ if not isinstance(long_factor, list) and all(isinstance(x, (int, float)) for x in long_factor):
764
+ logger.warning(f"`rope_scaling`'s long_factor field must be a list of numbers, got {long_factor}")
765
+ if len(long_factor) != dim // 2:
766
+ logger.warning(f"`rope_scaling`'s long_factor field must have length {dim // 2}, got {len(long_factor)}")
767
+
768
+ # Handle Phi3 divergence: prefer the use of `attention_factor` and/or `factor` over
769
+ # `original_max_position_embeddings` to compute internal variables. The latter lives outside `rope_scaling` and is
770
+ # unique to longrope (= undesirable)
771
+ if hasattr(config, "original_max_position_embeddings"):
772
+ logger.warning_once(
773
+ "This model has set a `original_max_position_embeddings` field, to be used together with "
774
+ "`max_position_embeddings` to determine a scaling factor. Please set the `factor` field of `rope_scaling`"
775
+ "with this ratio instead -- we recommend the use of this field over `original_max_position_embeddings`, "
776
+ "as it is compatible with most model architectures."
777
+ )
778
+ else:
779
+ factor = rope_scaling.get("factor")
780
+ if factor is None:
781
+ logger.warning("Missing required keys in `rope_scaling`: 'factor'")
782
+ elif not isinstance(factor, float) or factor < 1.0:
783
+ logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}")
784
+
785
+ attention_factor = rope_scaling.get("attention_factor")
786
+ if attention_factor is not None:
787
+ if not isinstance(attention_factor, float) or attention_factor < 0.0:
788
+ logger.warning(
789
+ f"`rope_scaling`'s attention_factor field must be a float greater than 0, got {attention_factor}"
790
+ )
791
+
792
+
793
+ def _validate_llama3_parameters(config: PretrainedConfig, ignore_keys: Optional[set] = None):
794
+ rope_scaling = config.rope_scaling
795
+ rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", None)) # BC: "rope_type" was originally "type"
796
+ required_keys = {"rope_type", "factor", "original_max_position_embeddings", "low_freq_factor", "high_freq_factor"}
797
+ received_keys = set(rope_scaling.keys())
798
+ _check_received_keys(rope_type, received_keys, required_keys, ignore_keys=ignore_keys)
799
+
800
+ factor = rope_scaling["factor"]
801
+ if factor is None or not isinstance(factor, float) or factor < 1.0:
802
+ logger.warning(f"`rope_scaling`'s factor field must be a float >= 1, got {factor}")
803
+
804
+ low_freq_factor = rope_scaling["low_freq_factor"]
805
+ high_freq_factor = rope_scaling["high_freq_factor"]
806
+ if low_freq_factor is None or not isinstance(low_freq_factor, float):
807
+ logger.warning(f"`rope_scaling`'s low_freq_factor field must be a float, got {low_freq_factor}")
808
+ if high_freq_factor is None or not isinstance(high_freq_factor, float):
809
+ logger.warning(f"`rope_scaling`'s high_freq_factor field must be a float, got {high_freq_factor}")
810
+ if high_freq_factor <= low_freq_factor:
811
+ logger.warning(
812
+ "`rope_scaling`'s high_freq_factor field must be greater than low_freq_factor, got high_freq_factor="
813
+ f"{high_freq_factor} and low_freq_factor={low_freq_factor}"
814
+ )
815
+
816
+ original_max_position_embeddings = rope_scaling["original_max_position_embeddings"]
817
+ if original_max_position_embeddings is None or not isinstance(original_max_position_embeddings, int):
818
+ logger.warning(
819
+ "`rope_scaling`'s original_max_position_embeddings field must be an integer, got "
820
+ f"{original_max_position_embeddings}"
821
+ )
822
+ if original_max_position_embeddings >= config.max_position_embeddings:
823
+ logger.warning(
824
+ "`rope_scaling`'s original_max_position_embeddings field must be less than max_position_embeddings, got "
825
+ f"{original_max_position_embeddings} and max_position_embeddings={config.max_position_embeddings}"
826
+ )
827
+
828
+
829
+ def _validate_fope_parameters(config: PretrainedConfig, ignore_keys: Optional[set] = None):
830
+ rope_scaling = config.rope_scaling
831
+ required_keys = {"type", "fope_init_factor", "fope_sep_head", "num_inv_freq"}
832
+ received_keys = set(rope_scaling.keys())
833
+ _check_received_keys("fope", received_keys, required_keys, ignore_keys=ignore_keys)
834
+
835
+ fope_init_factor = rope_scaling["fope_init_factor"]
836
+ if fope_init_factor is None or not isinstance(fope_init_factor, float) or fope_init_factor < 0.0:
837
+ logger.warning(f"`rope_scaling`'s fope_init_factor field must be a float >= 0, got {fope_init_factor}")
838
+
839
+ fope_sep_head = rope_scaling["fope_sep_head"]
840
+ if fope_sep_head is None or not isinstance(fope_sep_head, bool):
841
+ logger.warning(f"`rope_scaling`'s fope_sep_head field must be a boolean, got {fope_sep_head}")
842
+
843
+ num_inv_freq = rope_scaling["num_inv_freq"]
844
+ if num_inv_freq is None:
845
+ logger.warning(f"`rope_scaling`'s num_inv_freq field got None, the inv_freq greater than 2*pi/max_position_embeddings will be automatically selected")
846
+ elif not isinstance(num_inv_freq, int) or num_inv_freq < 0:
847
+ logger.warning(f"`rope_scaling`'s num_inv_freq field must be a non-negative integer, got {num_inv_freq}")
848
+
849
+
850
+ # Like `ROPE_INIT_FUNCTIONS`, this validation function mapping can be dynamically updated for custom RoPE types.
851
+ ROPE_VALIDATION_FUNCTIONS = {
852
+ "default": _validate_default_rope_parameters,
853
+ "linear": _validate_linear_scaling_rope_parameters,
854
+ "dynamic": _validate_dynamic_scaling_rope_parameters,
855
+ "yarn": _validate_yarn_parameters,
856
+ "longrope": _validate_longrope_parameters,
857
+ "llama3": _validate_llama3_parameters,
858
+ }
859
+
860
+
861
+ def rope_config_validation(config: PretrainedConfig, ignore_keys: Optional[set] = None):
862
+ """
863
+ Validate the RoPE config arguments, given a `PretrainedConfig` object
864
+ """
865
+ rope_scaling = getattr(config, "rope_scaling", None) # not a default parameter in `PretrainedConfig`
866
+ if rope_scaling is None:
867
+ return
868
+
869
+ # BC: "rope_type" was originally "type"
870
+ rope_type = rope_scaling.get("rope_type", rope_scaling.get("type", "default"))
871
+ validation_fn = ROPE_VALIDATION_FUNCTIONS.get(rope_type)
872
+ if validation_fn is not None:
873
+ validation_fn(config, ignore_keys=ignore_keys)
874
+ else:
875
+ logger.warning(
876
+ f"Missing validation function mapping in `ROPE_VALIDATION_FUNCTIONS` for 'rope_type'='{rope_type}'"
877
+ )
878
+
879
+ use_fope = (
880
+ config.rope_scaling.get("fope_init_factor", None) is not None \
881
+ or config.rope_scaling.get("fope_sep_heads", None) is not None \
882
+ or config.rope_scaling.get("num_inv_freq", None) is not None
883
+ )
884
+ if use_fope:
885
+ _validate_fope_parameters(config, ignore_keys=ignore_keys)
panda.jpg ADDED
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "size": {
3
+ "longest_edge": 16777216,
4
+ "shortest_edge": 65536
5
+ },
6
+ "patch_size": 16,
7
+ "temporal_patch_size": 2,
8
+ "merge_size": 2,
9
+ "image_mean": [
10
+ 0.5,
11
+ 0.5,
12
+ 0.5
13
+ ],
14
+ "image_std": [
15
+ 0.5,
16
+ 0.5,
17
+ 0.5
18
+ ],
19
+ "image_processor_type": "Qwen2VLImageProcessorFast",
20
+ "auto_map": {
21
+ "AutoProcessor": "processing_interns1_pro.InternS1ProProcessor"
22
+ }
23
+ }
processing_interns1_pro.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/qwen3_vl/modular_qwen3_vl.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_qwen3_vl.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+ from typing import Union
22
+
23
+ import numpy as np
24
+
25
+ from transformers.feature_extraction_utils import BatchFeature
26
+ from transformers.image_utils import ImageInput
27
+ from transformers.processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
28
+ from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
29
+ from transformers.utils import logging
30
+ from transformers.video_utils import VideoInput
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+
36
+ class InternS1ProProcessorKwargs(ProcessingKwargs, total=False):
37
+ _defaults = {
38
+ "text_kwargs": {
39
+ "padding": False,
40
+ "return_token_type_ids": False,
41
+ "return_mm_token_type_ids": False,
42
+ },
43
+ "videos_kwargs": {"return_metadata": True},
44
+ }
45
+
46
+
47
+ class InternS1ProProcessor(ProcessorMixin):
48
+ r"""
49
+ Constructs a Qwen3VL processor which wraps a Qwen3VL image processor and a Qwen2 tokenizer into a single processor.
50
+ [`Qwen3VLProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`] and [`Qwen2TokenizerFast`]. See the
51
+ [`~Qwen3VLProcessor.__call__`] and [`~Qwen3VLProcessor.decode`] for more information.
52
+ Args:
53
+ image_processor ([`Qwen2VLImageProcessor`], *optional*):
54
+ The image processor is a required input.
55
+ tokenizer ([`Qwen2TokenizerFast`], *optional*):
56
+ The tokenizer is a required input.
57
+ video_processor ([`Qwen3VLVideoProcessor`], *optional*):
58
+ The video processor is a required input.
59
+ chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
60
+ in a chat into a tokenizable string.
61
+ """
62
+
63
+ attributes = ["image_processor", "tokenizer", "video_processor"]
64
+ image_processor_class = "AutoImageProcessor"
65
+ video_processor_class = "AutoVideoProcessor"
66
+ tokenizer_class = "AutoTokenizer"
67
+
68
+ def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs):
69
+ self.image_token = "<|image_pad|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token
70
+ self.video_token = "<|video_pad|>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token
71
+ self.image_token_id = (
72
+ tokenizer.image_token_id
73
+ if getattr(tokenizer, "image_token_id", None)
74
+ else tokenizer.convert_tokens_to_ids(self.image_token)
75
+ )
76
+ self.video_token_id = (
77
+ tokenizer.video_token_id
78
+ if getattr(tokenizer, "video_token_id", None)
79
+ else tokenizer.convert_tokens_to_ids(self.video_token)
80
+ )
81
+ super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template)
82
+ self.vision_start_token = (
83
+ "<|vision_start|>" if not hasattr(tokenizer, "vision_start_token") else tokenizer.vision_start_token
84
+ )
85
+ self.vision_end_token = (
86
+ "<|vision_end|>" if not hasattr(tokenizer, "vision_end_token") else tokenizer.vision_end_token
87
+ )
88
+ self.vision_start_token_id = (
89
+ tokenizer.vision_start_token_id
90
+ if getattr(tokenizer, "vision_start_token_id", None)
91
+ else tokenizer.convert_tokens_to_ids(self.vision_start_token)
92
+ )
93
+ self.vision_end_token_id = (
94
+ tokenizer.vision_end_token_id
95
+ if getattr(tokenizer, "vision_end_token_id", None)
96
+ else tokenizer.convert_tokens_to_ids(self.vision_end_token)
97
+ )
98
+
99
+ def __call__(
100
+ self,
101
+ images: ImageInput = None,
102
+ text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
103
+ videos: VideoInput = None,
104
+ **kwargs: Unpack[InternS1ProProcessorKwargs],
105
+ ) -> BatchFeature:
106
+ """
107
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
108
+ and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
109
+ the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwrags` arguments to
110
+ Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`.
111
+
112
+ Args:
113
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
114
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
115
+ tensor. Both channels-first and channels-last formats are supported.
116
+ text (`str`, `list[str]`, `list[list[str]]`):
117
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
118
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
119
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
120
+ videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
121
+ The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
122
+ tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
123
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
124
+ If set, will return tensors of a particular framework. Acceptable values are:
125
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
126
+ - `'np'`: Return NumPy `np.ndarray` objects.
127
+
128
+ Returns:
129
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
130
+
131
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
132
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
133
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
134
+ `None`).
135
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
136
+ - **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
137
+ - **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
138
+ - **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
139
+ """
140
+ output_kwargs = self._merge_kwargs(
141
+ InternS1ProProcessorKwargs,
142
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
143
+ **kwargs,
144
+ )
145
+ if images is not None:
146
+ image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
147
+ image_grid_thw = image_inputs["image_grid_thw"]
148
+ else:
149
+ image_inputs = {}
150
+ image_grid_thw = None
151
+
152
+ if videos is not None:
153
+ videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
154
+ video_grid_thw = videos_inputs["video_grid_thw"]
155
+ # If user has not requested video metadata, pop it
156
+ if "return_metadata" not in kwargs:
157
+ video_metadata = videos_inputs.pop("video_metadata")
158
+ else:
159
+ video_metadata = videos_inputs["video_metadata"]
160
+ video_grid_thw = videos_inputs["video_grid_thw"]
161
+ else:
162
+ videos_inputs = {}
163
+ video_grid_thw = None
164
+
165
+ if not isinstance(text, list):
166
+ text = [text]
167
+
168
+ text = text.copy() # below lines change text in-place
169
+ if image_grid_thw is not None:
170
+ merge_length = self.image_processor.merge_size**2
171
+ index = 0
172
+ for i in range(len(text)):
173
+ while self.image_token in text[i]:
174
+ num_image_tokens = image_grid_thw[index].prod() // merge_length
175
+ text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
176
+ index += 1
177
+ text[i] = text[i].replace("<|placeholder|>", self.image_token)
178
+
179
+ if video_grid_thw is not None:
180
+ merge_length = self.video_processor.merge_size**2
181
+ index = 0
182
+ for i in range(len(text)):
183
+ while self.video_token in text[i]:
184
+ metadata = video_metadata[index]
185
+ if metadata.fps is None:
186
+ logger.warning_once(
187
+ "Qwen3VL requires frame timestamps to construct prompts, but the `fps` of the input video could not be inferred. "
188
+ "Probably `video_metadata` was missing from inputs and you passed pre-sampled frames. "
189
+ "Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results."
190
+ )
191
+ metadata.fps = 24 if metadata.fps is None else metadata.fps
192
+
193
+ # if timestamps are not provided, calculate them
194
+ curr_timestamp = self._calculate_timestamps(
195
+ metadata.frames_indices,
196
+ metadata.fps,
197
+ self.video_processor.merge_size,
198
+ )
199
+
200
+ video_placeholder = ""
201
+ frame_seqlen = video_grid_thw[index][1:].prod() // merge_length
202
+ for frame_idx in range(video_grid_thw[index][0]):
203
+ curr_time = curr_timestamp[frame_idx]
204
+ video_placeholder += f"<{curr_time:.1f} seconds>"
205
+ video_placeholder += (
206
+ self.vision_start_token + "<|placeholder|>" * frame_seqlen + self.vision_end_token
207
+ )
208
+ if f"{self.vision_start_token}{self.video_token}{self.vision_end_token}" in text[i]:
209
+ text[i] = text[i].replace(
210
+ f"{self.vision_start_token}{self.video_token}{self.vision_end_token}", video_placeholder, 1
211
+ )
212
+ else:
213
+ # vllm may input video token directly
214
+ text[i] = text[i].replace(self.video_token, video_placeholder, 1)
215
+ index += 1
216
+
217
+ text[i] = text[i].replace("<|placeholder|>", self.video_token)
218
+
219
+ return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
220
+ return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", None)
221
+ text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
222
+ self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"])
223
+
224
+ if return_mm_token_type_ids:
225
+ array_ids = np.array(text_inputs["input_ids"])
226
+ mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
227
+ mm_token_type_ids[array_ids == self.image_token_id] = 1
228
+ text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
229
+
230
+ return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors)
231
+
232
+ def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):
233
+ """
234
+ Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
235
+ Args:
236
+ image_sizes (`list[list[int]]`, *optional*):
237
+ The input sizes formatted as (height, width) per each image.
238
+ video_sizes (`list[list[int]]`, *optional*):
239
+ The input sizes formatted as (num_frames, height, width) per each video.
240
+ Returns:
241
+ `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
242
+ input modalities, along with other useful data.
243
+ """
244
+
245
+ vision_data = {}
246
+ if image_sizes is not None:
247
+ images_kwargs = InternS1ProProcessorKwargs._defaults.get("images_kwargs", {})
248
+ images_kwargs.update(kwargs)
249
+ merge_size = images_kwargs.get("merge_size", None) or self.image_processor.merge_size
250
+
251
+ num_image_patches = [
252
+ self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
253
+ for image_size in image_sizes
254
+ ]
255
+ num_image_tokens = [(num_patches // merge_size**2) for num_patches in num_image_patches]
256
+ vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
257
+
258
+ if video_sizes is not None:
259
+ videos_kwargs = InternS1ProProcessorKwargs._defaults.get("videos_kwargs", {})
260
+ videos_kwargs.update(kwargs)
261
+ num_video_patches = [
262
+ self.video_processor.get_number_of_video_patches(*video_size, videos_kwargs)
263
+ for video_size in video_sizes
264
+ ]
265
+ num_video_tokens = [(num_patches // merge_size**2) for num_patches in num_video_patches]
266
+ vision_data["num_video_tokens"] = num_video_tokens
267
+
268
+ return MultiModalData(**vision_data)
269
+
270
+ def post_process_image_text_to_text(
271
+ self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs
272
+ ):
273
+ """
274
+ Post-process the output of the model to decode the text.
275
+
276
+ Args:
277
+ generated_outputs (`torch.Tensor` or `np.ndarray`):
278
+ The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
279
+ or `(sequence_length,)`.
280
+ skip_special_tokens (`bool`, *optional*, defaults to `True`):
281
+ Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
282
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
283
+ Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
284
+ **kwargs:
285
+ Additional arguments to be passed to the tokenizer's `batch_decode method`.
286
+
287
+ Returns:
288
+ `list[str]`: The decoded text.
289
+ """
290
+ return self.tokenizer.batch_decode(
291
+ generated_outputs,
292
+ skip_special_tokens=skip_special_tokens,
293
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
294
+ **kwargs,
295
+ )
296
+
297
+ def _calculate_timestamps(self, indices: Union[list[int], np.ndarray], video_fps: float, merge_size: int = 2):
298
+ if not isinstance(indices, list):
299
+ indices = indices.tolist()
300
+ if len(indices) % merge_size != 0:
301
+ indices.extend(indices[-1] for _ in range(merge_size - len(indices) % merge_size))
302
+ timestamps = [idx / video_fps for idx in indices]
303
+ # @JJJYmmm frames are merged by self.merge_size, \
304
+ # so we need to average the timestamps between the first/last frame within the temporal patch
305
+ timestamps = [
306
+ (timestamps[i] + timestamps[i + merge_size - 1]) / 2 for i in range(0, len(timestamps), merge_size)
307
+ ]
308
+ return timestamps
309
+
310
+
311
+ __all__ = ["InternS1ProProcessor"]
special_tokens_map.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "bos_token": {
18
+ "content": "<|im_start|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "eos_token": {
25
+ "content": "<|im_end|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ },
31
+ "pad_token": {
32
+ "content": "<|endoftext|>",
33
+ "lstrip": false,
34
+ "normalized": false,
35
+ "rstrip": false,
36
+ "single_word": false
37
+ }
38
+ }
test_inference.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import torch
3
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoProcessor
4
+
5
+
6
+ model_path = Path(__file__).parent.resolve()
7
+ print(f"Loading model from: {model_path}")
8
+
9
+ # 加载模型配置
10
+ config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
11
+ print(f"Model config: {config.model_type}")
12
+ print(f"Architecture: {config.architectures}")
13
+
14
+ # 加载模型(使用 bfloat16 精度和自动设备映射)
15
+ print("\nLoading model...")
16
+ model = AutoModelForCausalLM.from_pretrained(
17
+ model_path,
18
+ dtype=torch.bfloat16,
19
+ device_map="auto",
20
+ attn_implementation="flash_attention_2",
21
+ trust_remote_code=True
22
+ )
23
+ print(f"✓ Model loaded successfully!")
24
+ print(f"Model type: {type(model).__name__}")
25
+ print(f"Model device: {model.device}")
26
+
27
+ # 加载处理器(tokenizer + image processor)
28
+ print("\nLoading processor...")
29
+ processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True)
30
+
31
+
32
+ # ============================================================================
33
+ # 测试 1: 纯文本对话
34
+ # ============================================================================
35
+ print("\n" + "=" * 80)
36
+ print("测试 1: 纯文本对话")
37
+ print("=" * 80)
38
+
39
+ text_messages = [
40
+ {
41
+ "role": "user",
42
+ "content": [
43
+ {"type": "text", "text": "你好,请介绍一下自己,包括你的能力和用途。"}
44
+ ]
45
+ }
46
+ ]
47
+
48
+ print("\n准备纯文本输入...")
49
+ text_inputs = processor.apply_chat_template(
50
+ text_messages,
51
+ tokenize=True,
52
+ add_generation_prompt=True,
53
+ return_dict=True,
54
+ return_tensors="pt"
55
+ )
56
+ text_inputs = text_inputs.to(model.device)
57
+
58
+ print(f"Input shape: {text_inputs['input_ids'].shape}")
59
+ print(f"Has pixel values: {'pixel_values' in text_inputs}")
60
+
61
+ print("\n生成纯文本回复...")
62
+ with torch.inference_mode():
63
+ text_generated_ids = model.generate(
64
+ **text_inputs,
65
+ max_new_tokens=256,
66
+ do_sample=False,
67
+ temperature=1.0,
68
+ )
69
+
70
+ text_generated_ids_trimmed = [
71
+ out_ids[len(in_ids):] for in_ids, out_ids in zip(text_inputs.input_ids, text_generated_ids)
72
+ ]
73
+
74
+ text_output = processor.batch_decode(
75
+ text_generated_ids_trimmed,
76
+ skip_special_tokens=True,
77
+ clean_up_tokenization_spaces=False
78
+ )
79
+
80
+ print("\n" + "-" * 80)
81
+ print("纯文本输出:")
82
+ print("-" * 80)
83
+ print(text_output[0])
84
+ print("-" * 80)
85
+ print("\n✅ 纯文本测试完成!")
86
+
87
+ # ============================================================================
88
+ # 测试 2: 图文混合输入
89
+ # ============================================================================
90
+ print("\n" + "=" * 80)
91
+ print("测试 2: 图文混合输入(多模态)")
92
+ print("=" * 80)
93
+
94
+ # 构建对话消息(图文混合输入)
95
+ multimodal_messages = [
96
+ {
97
+ "role": "user",
98
+ "content": [
99
+ {"type": "image", "image": "./panda.jpg"},
100
+ # {"type": "image", "image": "./milk.jpeg"},
101
+ {"type": "text", "text": "请描述这张图"},
102
+ ],
103
+ }
104
+ ]
105
+
106
+ print("\n准备图文混合输入...")
107
+ # 应用对话模板并进行 tokenization
108
+ multimodal_inputs = processor.apply_chat_template(
109
+ multimodal_messages,
110
+ tokenize=True,
111
+ add_generation_prompt=True,
112
+ return_dict=True,
113
+ return_tensors="pt"
114
+ )
115
+ multimodal_inputs = multimodal_inputs.to(model.device)
116
+
117
+ print(f"Input shape: {multimodal_inputs['input_ids'].shape}")
118
+ print(f"Pixel values shape: {multimodal_inputs['pixel_values'].shape if 'pixel_values' in multimodal_inputs else 'N/A'}")
119
+
120
+ # 生成输出
121
+ print("\n生成图像描述...")
122
+ with torch.inference_mode():
123
+ multimodal_generated_ids = model.generate(
124
+ **multimodal_inputs,
125
+ max_new_tokens=512,
126
+ do_sample=False,
127
+ temperature=1.0,
128
+ )
129
+
130
+ # 提取生成的 token(去除输入部分)
131
+ multimodal_generated_ids_trimmed = [
132
+ out_ids[len(in_ids):] for in_ids, out_ids in zip(multimodal_inputs.input_ids, multimodal_generated_ids)
133
+ ]
134
+
135
+ # 解码为文本
136
+ multimodal_output = processor.batch_decode(
137
+ multimodal_generated_ids_trimmed,
138
+ skip_special_tokens=True,
139
+ clean_up_tokenization_spaces=False
140
+ )
141
+
142
+ print("\n" + "-" * 80)
143
+ print("图像描述输出:")
144
+ print("-" * 80)
145
+ print(multimodal_output[0])
146
+ print("-" * 80)
147
+ print("\n✅ 多模态测试完成!")
test_router_logits.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoProcessor
3
+ from modeling_interns1_pro import InternS1ProForConditionalGeneration
4
+
5
+ # 加载模型
6
+ model_path = "." # 当前目录
7
+ model = InternS1ProForConditionalGeneration.from_pretrained(
8
+ model_path,
9
+ torch_dtype=torch.bfloat16,
10
+ device_map="auto",
11
+ trust_remote_code=True,
12
+ )
13
+
14
+ # 简单的文本输入测试
15
+ input_ids = torch.tensor([[1, 2, 3, 4, 5]]).to(model.device)
16
+ attention_mask = torch.ones_like(input_ids)
17
+
18
+ # 测试 1: 不请求 router_logits
19
+ print("=" * 50)
20
+ print("测试 1: 不请求 output_router_logits")
21
+ seq_len = input_ids.shape[1]
22
+ cache_position = torch.arange(seq_len, device=model.device)
23
+ outputs = model(
24
+ input_ids=input_ids,
25
+ attention_mask=attention_mask,
26
+ cache_position=cache_position,
27
+ )
28
+ print(f"outputs keys: {outputs.keys() if hasattr(outputs, 'keys') else dir(outputs)}")
29
+ print(f"outputs.aux_loss: {outputs.aux_loss}")
30
+
31
+ # 测试 2: 请求 router_logits
32
+ print("=" * 50)
33
+ print("测试 2: 请求 output_router_logits=True")
34
+ outputs = model(
35
+ input_ids=input_ids,
36
+ attention_mask=attention_mask,
37
+ output_router_logits=True,
38
+ cache_position=cache_position,
39
+ )
40
+ print(f"outputs.aux_loss: {outputs.aux_loss}")
41
+
42
+ # 检查是否有 router_logits 属性
43
+ if hasattr(outputs, 'router_logits'):
44
+ router_logits = outputs.router_logits
45
+ print(f"router_logits type: {type(router_logits)}")
46
+ if router_logits is not None:
47
+ if isinstance(router_logits, tuple):
48
+ print(f"router_logits length: {len(router_logits)}")
49
+ for i, rl in enumerate(router_logits):
50
+ if rl is not None:
51
+ print(f" layer {i}: shape={rl.shape}, dtype={rl.dtype}")
52
+ print(f" min={rl.min().item():.4f}, max={rl.max().item():.4f}, mean={rl.mean().item():.4f}")
53
+ else:
54
+ print(f"router_logits shape: {router_logits.shape}")
55
+ else:
56
+ print("router_logits is None")
57
+ else:
58
+ print("outputs 没有 router_logits 属性")
59
+
60
+ # 测试 3: 手动检查 MoE 层的 gate
61
+ print("=" * 50)
62
+ print("测试 3: 手动检查 MoE 层")
63
+ moe_layer_indices = []
64
+ for i, layer in enumerate(model.model.language_model.layers):
65
+ mlp = layer.mlp
66
+ mlp_class_name = mlp.__class__.__name__
67
+ print(f"Layer {i}: mlp type = {mlp_class_name}")
68
+ if "SparseMoe" in mlp_class_name or "Moe" in mlp_class_name:
69
+ moe_layer_indices.append(i)
70
+ if hasattr(mlp, 'gate'):
71
+ print(f" gate: {mlp.gate}")
72
+ print(f" gate type: {type(mlp.gate)}")
73
+
74
+ print(f"\nMoE layers: {moe_layer_indices}")
75
+ print(f"Total MoE layers: {len(moe_layer_indices)}")
76
+
77
+ # 测试 4: 手动 forward 一个 MoE 层并获取 router_logits
78
+ print("=" * 50)
79
+ print("测试 4: 手动获取 router_logits")
80
+ if moe_layer_indices:
81
+ layer_idx = moe_layer_indices[0]
82
+ moe_block = model.model.language_model.layers[layer_idx].mlp
83
+
84
+ # 创建假输入
85
+ hidden_size = model.config.text_config.hidden_size
86
+ fake_hidden = torch.randn(1, 5, hidden_size, dtype=torch.bfloat16, device=model.device)
87
+
88
+ # 手动计算 router_logits
89
+ # hidden_flat = fake_hidden.reshape(-1, hidden_size)
90
+ # router_logits_manual = moe_block.gate(hidden_flat)
91
+ router_logits_manual, router_scores, router_indices = moe_block.gate(fake_hidden)
92
+ print(f"手动计算的 router_logits shape: {router_logits_manual.shape}")
93
+ print(f" 应该是 (batch*seq_len, num_experts) = (5, {model.config.text_config.num_experts})")
94
+ print(f"router_scores shape: {router_scores.shape}")
95
+ print(f"router_indices shape: {router_indices.shape}")
tokenization_interns1.py ADDED
@@ -0,0 +1,1007 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 The Intern team and Shanghai AI Lab team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for InternS1."""
16
+
17
+ import json
18
+ import os
19
+ import unicodedata
20
+ from abc import ABC, abstractmethod
21
+ from typing import Optional, Union
22
+ from functools import lru_cache
23
+
24
+ import regex as re
25
+ import sentencepiece as spm
26
+
27
+ from transformers.tokenization_utils_base import AddedToken, TextInput
28
+ from transformers.tokenization_utils import PreTrainedTokenizer
29
+ from transformers.utils import logging
30
+ # from transformers.utils.import_utils import requires
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+ try:
36
+ from rdkit import Chem, RDLogger
37
+
38
+ RDLogger.DisableLog("rdApp.error")
39
+ RDLogger.DisableLog("rdApp.*")
40
+ RDKIT_AVAILABLE = True
41
+ except ImportError:
42
+ logger.warning_once(
43
+ "If tokenization with SMILES formula is of necessity, please 'pip install RDKit' for better tokenization quality."
44
+ )
45
+ RDKIT_AVAILABLE = False
46
+
47
+ VOCAB_FILES_NAMES = {
48
+ "vocab_file": "vocab.json",
49
+ "merges_file": "merges.txt",
50
+ "sp_model_SMILES": "tokenizer_SMILES.model",
51
+ "sp_model_PROT": "tokenizer_PROT.model",
52
+ "sp_model_XNA": "tokenizer_XNA.model",
53
+ }
54
+
55
+ PRETOKENIZE_REGEX = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
56
+
57
+
58
+ class InternS1CheckModuleMixin(ABC):
59
+ """
60
+ Basic auto-detection module.
61
+
62
+ Note that short strings are ignored by this module.
63
+ """
64
+
65
+ def __init__(self, *, min_length: int):
66
+ self.min_length = min_length
67
+ self.REGEX = self._build_regex()
68
+ self.all_auto_detect_token_start = ["<SMILES_AUTO_DETECT>", "<PROT_AUTO_DETECT>", "<XNA_AUTO_DETECT>"]
69
+ self.all_auto_detect_token_end = ["</SMILES_AUTO_DETECT>", "</PROT_AUTO_DETECT>", "</XNA_AUTO_DETECT>"]
70
+ self.auto_detect_token = []
71
+ self.truncation = False
72
+
73
+ @abstractmethod
74
+ def _build_regex(self):
75
+ pass
76
+
77
+ @abstractmethod
78
+ def check_legitimacy(self, candidate: str) -> bool:
79
+ pass
80
+
81
+ def re_split(self, texts: Union[str, list[str]]) -> list[str]:
82
+ if isinstance(texts, str):
83
+ texts = [texts]
84
+
85
+ total_results = []
86
+
87
+ no_split_flag = 0
88
+
89
+ for text in texts:
90
+ if text in self.all_auto_detect_token_start:
91
+ total_results.append(text)
92
+ no_split_flag += 1
93
+ continue
94
+ elif text in self.all_auto_detect_token_end:
95
+ total_results.append(text)
96
+ no_split_flag = max(0, no_split_flag - 1)
97
+ continue
98
+
99
+ if no_split_flag > 0:
100
+ total_results.append(text)
101
+ continue
102
+
103
+ results = []
104
+ current_pos = 0
105
+ for match in self.REGEX.finditer(text):
106
+ candidate = match.group(1)
107
+
108
+ if len(candidate) >= self.min_length:
109
+ match_start, match_end = match.span(1)
110
+
111
+ if not self.check_legitimacy(candidate):
112
+ continue
113
+
114
+ if not self.truncation:
115
+ if match_start > 0 and text[match_start - 1].encode("UTF-8").isalpha():
116
+ continue
117
+ if match_end < len(text) and text[match_end].encode("UTF-8").isalpha():
118
+ continue
119
+
120
+ if match_start > current_pos:
121
+ non_candidate_part = text[current_pos:match_start]
122
+ results.append(non_candidate_part)
123
+ else:
124
+ continue
125
+
126
+ results.extend([self.auto_detect_token[0], candidate, self.auto_detect_token[1]])
127
+ current_pos = match_end
128
+
129
+ if current_pos < len(text):
130
+ remaining_part = text[current_pos:]
131
+ results.append(remaining_part)
132
+
133
+ total_results.extend(results)
134
+
135
+ return total_results
136
+
137
+
138
+ class XnaCheckModule(InternS1CheckModuleMixin):
139
+ """
140
+ XNA sequence auto-detection module.
141
+
142
+ Automatically detects XNA sequence using regex patterns.
143
+ """
144
+ def __init__(self, *, min_length: int = 27):
145
+ super().__init__(min_length=min_length)
146
+ self.auto_detect_token = ["<XNA_AUTO_DETECT>", "</XNA_AUTO_DETECT>"]
147
+ self.truncation = True
148
+
149
+ def _build_regex(self):
150
+ return re.compile(r"([ATCGU]{" + str(self.min_length) + r",})")
151
+
152
+ def check_legitimacy(self, candidate: str):
153
+ return True
154
+
155
+
156
+ class ProtCheckModule(InternS1CheckModuleMixin):
157
+ """
158
+ Protein sequence auto-detection module.
159
+
160
+ Automatically detects protein sequence using regex patterns.
161
+ """
162
+ def __init__(self, *, min_length: int = 27):
163
+ super().__init__(min_length=min_length)
164
+ self.auto_detect_token = ["<PROT_AUTO_DETECT>", "</PROT_AUTO_DETECT>"]
165
+ self.truncation = True
166
+ self._xna_pattern = re.compile(r"^[ATCGU]+$")
167
+
168
+ def _build_regex(self):
169
+ return re.compile(r"([A-Z]{" + str(self.min_length) + r",})")
170
+
171
+ def check_legitimacy(self, candidate: str):
172
+ if self._xna_pattern.match(candidate):
173
+ return False
174
+ return True
175
+
176
+
177
+ # fmt: off
178
+ bonds = ["-", "=", "#", ":", "/", "\\", ".", "$"]
179
+ organic_symbols = ["B", "C", "N", "O", "P", "S", "F", "Cl", "Br", "I"]
180
+ other_allows = bonds + ["[", "]", "(", ")", ";"]
181
+ aromatic_symbols = ["b", "c", "n", "o", "s", "p"]
182
+ elements = [
183
+ "H", "He", "Li", "Be", "B", "C", "N", "O", "F", "Ne",
184
+ "Na", "Mg", "Al", "Si", "P", "S", "Cl", "Ar", "K", "Ca",
185
+ "Sc", "Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu", "Zn",
186
+ "Ga", "Ge", "As", "Se", "Br", "Kr", "Rb", "Sr", "Y", "Zr",
187
+ "Nb", "Mo", "Tc", "Ru", "Rh", "Pd", "Ag", "Cd", "In", "Sn",
188
+ "Sb", "Te", "I", "Xe", "Cs", "Ba", "La", "Ce", "Pr", "Nd",
189
+ "Pm", "Sm", "Eu", "Gd", "Tb", "Dy", "Ho", "Er", "Tm", "Yb",
190
+ "Lu", "Hf", "Ta", "W", "Re", "Os", "Ir", "Pt", "Au", "Hg",
191
+ "Tl", "Pb", "Bi", "Po", "At", "Rn", "Fr", "Ra", "Ac", "Th",
192
+ "Pa", "U", "Np", "Pu", "Am", "Cm", "Bk", "Cf", "Es", "Fm",
193
+ "Md", "No", "Lr", "Rf", "Db", "Sg", "Bh", "Hs", "Mt", "Ds",
194
+ "Rg", "Cn", "Nh", "Fl", "Mc", "Lv", "Ts", "Og"
195
+ ]
196
+ # fmt: on
197
+
198
+
199
+ class SmilesCheckModule(InternS1CheckModuleMixin):
200
+ """
201
+ SMILES molecular sequence auto-detection module.
202
+
203
+ Automatically detects and validates SMILES strings in text using regex patterns
204
+ or chemical syntax rules. Uses RDKit for precise validation when available,
205
+ otherwise falls back to rule-based validation.
206
+ """
207
+
208
+ def __init__(self, *, min_length: int = 10):
209
+ super().__init__(min_length=min_length)
210
+ self.auto_detect_token = ["<SMILES_AUTO_DETECT>", "</SMILES_AUTO_DETECT>"]
211
+ self._SQ_BRACKET_BAN_1 = re.compile(r"(?:[A-GI-Z]|[a-z]){3,}")
212
+ self._SQ_BRACKET_BAN_2 = re.compile(r"\d{4,}")
213
+
214
+ def _build_regex(self):
215
+ # fmt: off
216
+ _two_letter_elements = [
217
+ 'Ac', 'Ag', 'Al', 'Am', 'Ar', 'As', 'At', 'Au', 'Ba', 'Be', 'Bh', 'Bi', 'Bk', 'Br', 'Ca', 'Cd',
218
+ 'Ce', 'Cf', 'Cl', 'Cm', 'Cn', 'Co', 'Cr', 'Cs', 'Cu', 'Db', 'Ds', 'Dy', 'Er', 'Es', 'Eu', 'Fe',
219
+ 'Fl', 'Fm', 'Fr', 'Ga', 'Gd', 'Ge', 'He', 'Hf', 'Hg', 'Ho', 'Hs', 'In', 'Ir', 'Kr', 'La', 'Li',
220
+ 'Lr', 'Lu', 'Lv', 'Mc', 'Md', 'Mg', 'Mn', 'Mo', 'Mt', 'Na', 'Nb', 'Nd', 'Ne', 'Nh', 'Ni', 'No',
221
+ 'Np', 'Og', 'Os', 'Pa', 'Pb', 'Pd', 'Pm', 'Po', 'Pr', 'Pt', 'Pu', 'Ra', 'Rb', 'Re', 'Rf', 'Rg',
222
+ 'Rh', 'Rn', 'Ru', 'Sb', 'Sc', 'Se', 'Sg', 'Si', 'Sm', 'Sn', 'Sr', 'Ta', 'Tb', 'Tc', 'Te', 'Th',
223
+ 'Ti', 'Tl', 'Tm', 'Ts', 'Xe', 'Yb', 'Zn', 'Zr'
224
+ ]
225
+ _single_letter_elements = [
226
+ "B", "C", "F", "H", "I", "K", "N", "O", "P", "S", "U", "V", "W", "Y", 'b', 'c', 'n', 'o', 'p', 's'
227
+ ]
228
+ # fmt: on
229
+ all_elements_sorted = sorted(_two_letter_elements + _single_letter_elements, key=lambda x: (-len(x), x))
230
+ elements_pattern_str = "|".join(all_elements_sorted)
231
+
232
+ bracket_atom_pattern_str = r"\[[^\]]+\]"
233
+ other_single_chars_pattern_str = r"[\(\)\.=\-#@\d\$\%\*:\+\-\/\\]"
234
+ smiles_unit_pattern = (
235
+ r"(?:"
236
+ + bracket_atom_pattern_str
237
+ + r"|"
238
+ + elements_pattern_str
239
+ + r"|"
240
+ + other_single_chars_pattern_str
241
+ + r")"
242
+ )
243
+ core_sequence_pattern = rf"(?>{smiles_unit_pattern}){{10,}}"
244
+ constrained_core_sequence_pattern = rf"(?![:.=]){core_sequence_pattern}(?<![:.=])"
245
+
246
+ final_regex_str = rf"({constrained_core_sequence_pattern})"
247
+
248
+ COMPILED_REGEX = re.compile(final_regex_str)
249
+ return COMPILED_REGEX
250
+
251
+ def check_legitimacy_slow(self, candidate: str) -> bool:
252
+ """Check legitimacy with RDKit"""
253
+ if sum(1 for char in candidate if char.encode("UTF-8").isalpha()) < 5:
254
+ return False
255
+
256
+ mol = Chem.MolFromSmiles(candidate)
257
+ if mol is None:
258
+ return False
259
+ else:
260
+ return True
261
+
262
+ def check_legitimacy_fast(self, candidate: str) -> bool:
263
+ """Check legitimacy with hard rules"""
264
+ if sum(1 for char in candidate if char.encode("UTF-8").isalpha()) < 5:
265
+ return False
266
+
267
+ if not self.check_rings_and_brackets(candidate):
268
+ return False
269
+ else:
270
+ return True
271
+
272
+ def check_legitimacy(self, candidate: str) -> bool:
273
+ if RDKIT_AVAILABLE:
274
+ return self.check_legitimacy_slow(candidate)
275
+ else:
276
+ return self.check_legitimacy_fast(candidate)
277
+
278
+ def check_brackets(self, text):
279
+ matches = re.findall(r"\[([^\[\]]*)\]", text)
280
+ for part in matches:
281
+ if "(" in part or ")" in part:
282
+ return False
283
+ if len(part) == 0:
284
+ return False
285
+ if part[0] in elements or part[0] in aromatic_symbols or part[:2] in elements:
286
+ return True
287
+ return True
288
+
289
+ def check_rings_and_brackets(self, text):
290
+ rings = {}
291
+ left_sq_bracket, right_sq_bracket = 0, 0
292
+ left_pt_bracket, right_pt_bracket = 0, 0
293
+ all_lower = True
294
+ digits_cnt = 0
295
+ pos = 0
296
+ while pos < len(text):
297
+ step = 0
298
+ c = text[pos]
299
+ if ord(c) >= 65 and ord(c) <= 90:
300
+ all_lower = False
301
+ if (pos == len(text) - 1 or pos == 0) and c in bonds:
302
+ return False
303
+ if pos > 0 and text[pos - 1] in bonds and text[pos] in bonds:
304
+ return False
305
+ if c == "[":
306
+ step = 1
307
+ left_sq_bracket += 1
308
+ if left_sq_bracket > right_sq_bracket + 1:
309
+ return False
310
+ if pos == len(text) - 1:
311
+ return False
312
+ if "]" not in text[pos + 1 :]:
313
+ return False
314
+ bracket_span = text[pos + 1 : text.find("]")]
315
+
316
+ if self._SQ_BRACKET_BAN_1.search(bracket_span) or self._SQ_BRACKET_BAN_2.search(bracket_span):
317
+ return False
318
+
319
+ matches = re.findall(r"\d+", bracket_span)
320
+ if len(matches) > 2:
321
+ return False
322
+ if c == "]":
323
+ step = 1
324
+ right_sq_bracket += 1
325
+ if right_sq_bracket > left_sq_bracket:
326
+ return False
327
+
328
+ if c == "(":
329
+ step = 1
330
+ left_pt_bracket += 1
331
+ if c == ")":
332
+ step = 1
333
+ right_pt_bracket += 1
334
+ if right_pt_bracket > left_pt_bracket:
335
+ return False
336
+
337
+ if left_sq_bracket == right_sq_bracket:
338
+ if c.isdigit():
339
+ digits_cnt += 1
340
+ step = 1
341
+ if (
342
+ pos == 0
343
+ or (pos == 1 and text[pos - 1] != "%")
344
+ or (pos > 1 and text[pos - 1] != "%" and text[pos - 2] != "%")
345
+ ):
346
+ if c in rings:
347
+ if rings[c] == "unclosed":
348
+ rings[c] = "closed"
349
+ else:
350
+ rings[c] = "unclosed"
351
+ else:
352
+ rings[c] = "unclosed"
353
+ if c == "%":
354
+ if pos >= len(text) - 2 or not text[pos + 1].isdigit() or not text[pos + 2].isdigit():
355
+ return False
356
+ step = 3
357
+ digits_cnt += 1
358
+ num = text[pos + 1 : pos + 3]
359
+ if num in rings:
360
+ if rings[num] == "unclosed":
361
+ rings[num] = "closed"
362
+ else:
363
+ rings[num] = "unclosed"
364
+ else:
365
+ rings[num] = "unclosed"
366
+ if step == 0:
367
+ if (
368
+ pos < len(text) - 1
369
+ and text[pos : pos + 2] in organic_symbols + aromatic_symbols + other_allows
370
+ ):
371
+ step = 2
372
+ elif c in organic_symbols + aromatic_symbols + other_allows:
373
+ step = 1
374
+ else:
375
+ return False
376
+
377
+ if step == 0:
378
+ step = 1
379
+ pos += step
380
+
381
+ if left_sq_bracket != right_sq_bracket or any(v == "unclosed" for v in rings.values()):
382
+ return False
383
+ if all_lower and digits_cnt < 2:
384
+ return False
385
+ return self.check_brackets(text)
386
+
387
+
388
+ @lru_cache
389
+ # Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode
390
+ def bytes_to_unicode():
391
+ """
392
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
393
+ characters the bpe code barfs on.
394
+
395
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
396
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
397
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
398
+ tables between utf-8 bytes and unicode strings.
399
+ """
400
+ bs = (
401
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
402
+ )
403
+ cs = bs[:]
404
+ n = 0
405
+ for b in range(2**8):
406
+ if b not in bs:
407
+ bs.append(b)
408
+ cs.append(2**8 + n)
409
+ n += 1
410
+ cs = [chr(n) for n in cs]
411
+ return dict(zip(bs, cs))
412
+
413
+
414
+ # Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs
415
+ def get_pairs(word):
416
+ """
417
+ Return set of symbol pairs in a word.
418
+
419
+ Word is represented as tuple of symbols (symbols being variable-length strings).
420
+ """
421
+ pairs = set()
422
+ prev_char = word[0]
423
+ for char in word[1:]:
424
+ pairs.add((prev_char, char))
425
+ prev_char = char
426
+ return pairs
427
+
428
+
429
+ # @requires(backends=("sentencepiece",))
430
+ class InternS1Tokenizer(PreTrainedTokenizer):
431
+ """
432
+ Construct an InternS1 tokenizer. Based on byte-level Byte-Pair-Encoding.
433
+
434
+ Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
435
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
436
+
437
+ ```python
438
+ >>> from transformers import AutoTokenizer
439
+
440
+ >>> tokenizer = AutoTokenizer.from_pretrained("InternS1Tokenizer", trust_remote_code=True)
441
+ >>> tokenizer("Hello world")["input_ids"]
442
+ [9707, 1879]
443
+
444
+ >>> tokenizer(" Hello world")["input_ids"]
445
+ [21927, 1879]
446
+ ```
447
+ This is expected.
448
+
449
+ Include custom extension to support better domain-specific text tokenization, leveraging a separately trained tokenizer model.
450
+
451
+ ```python
452
+ >>> from transformers import AutoTokenizer
453
+
454
+ >>> tokenizer = AutoTokenizer.from_pretrained("InternS1Tokenizer", trust_remote_code=True)
455
+ >>> tokenizer.tokenize("Describe <SMILES>C1=CC=C(C=C1)C=O</SMILES> and CC1=CC=CC=C1C=O")
456
+ ["Describe ", "<SMILES>", "C1=CC=C(C=C1)C=O", "</SMILES>", " and ", "<SMILES_AUTO_DETECT>",
457
+ "CC1=CC=CC=C1C=O", "</SMILES_AUTO_DETECT>"]
458
+ >>> token_ids = tokenizer("Describe <SMILES>C1=CC=C(C=C1)C=O</SMILES> and CC1=CC=CC=C1C=O")["input_ids"]
459
+ >>> token_ids
460
+ [74785, 220, 151925, 151854, 151860, 151698, 151707, 151860, 151690, 151726, 151926, 323, 220, 151672, 151860, 151701, 151860, 151854, 151726]
461
+
462
+ >>> tokenizer.convert_ids_to_tokens(token_ids)
463
+ ['Describe', 'Ġ', '<SMILES>', 'C', '1', '=CC=C(', 'C=C', '1', ')C', '=O', '</SMILES>', 'Ġand', 'Ġ', 'CC', '1', '=CC=CC=C', '1', 'C', '=O']
464
+ ```
465
+
466
+ Users should refer to this superclass [`PreTrainedTokenizer`] for more information regarding those overloaded methods
467
+
468
+ Args:
469
+ vocab_file (`str`):
470
+ Path to the vocabulary file.
471
+ merges_file (`str`):
472
+ Path to the merges file.
473
+ errors (`str`, *optional*, defaults to `"replace"`):
474
+ Paradigm to follow when decoding bytes to UTF-8. See
475
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
476
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
477
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
478
+ token instead.
479
+ bos_token (`str`, *optional*):
480
+ The beginning of sequence token. Not applicable for this tokenizer.
481
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
482
+ The end of sequence token.
483
+ pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
484
+ The token used for padding, for example when batching sequences of different lengths.
485
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
486
+ Whether or not the model should cleanup the spaces that were added when splitting the input text during the
487
+ tokenization process. Not applicable to this tokenizer, since tokenization does not add spaces.
488
+ split_special_tokens (`bool`, *optional*, defaults to `False`):
489
+ Whether or not the special tokens should be split during the tokenization process. The default behavior is
490
+ to not split special tokens. This means that if `<|endoftext|>` is the `eos_token`, then `tokenizer.tokenize("<|endoftext|>") =
491
+ ['<|endoftext|>`]. Otherwise, if `split_special_tokens=True`, then `tokenizer.tokenize("<|endoftext|>")` will be give `['<',
492
+ '|', 'endo', 'ft', 'ext', '|', '>']`. This argument is only supported for `slow` tokenizers for the moment.
493
+ """
494
+
495
+ vocab_files_names = VOCAB_FILES_NAMES
496
+ model_input_names = ["input_ids", "attention_mask"]
497
+
498
+ def __init__(
499
+ self,
500
+ vocab_file,
501
+ merges_file,
502
+ errors="replace",
503
+ unk_token="<|endoftext|>",
504
+ bos_token=None,
505
+ eos_token="<|endoftext|>",
506
+ pad_token="<|endoftext|>",
507
+ clean_up_tokenization_spaces=False,
508
+ split_special_tokens=False,
509
+ **kwargs,
510
+ ):
511
+ bos_token = (
512
+ AddedToken(bos_token, lstrip=False, rstrip=False, special=True, normalized=False)
513
+ if isinstance(bos_token, str)
514
+ else bos_token
515
+ )
516
+ eos_token = (
517
+ AddedToken(eos_token, lstrip=False, rstrip=False, special=True, normalized=False)
518
+ if isinstance(eos_token, str)
519
+ else eos_token
520
+ )
521
+ unk_token = (
522
+ AddedToken(unk_token, lstrip=False, rstrip=False, special=True, normalized=False)
523
+ if isinstance(unk_token, str)
524
+ else unk_token
525
+ )
526
+ pad_token = (
527
+ AddedToken(pad_token, lstrip=False, rstrip=False, special=True, normalized=False)
528
+ if isinstance(pad_token, str)
529
+ else pad_token
530
+ )
531
+
532
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
533
+ self.encoder = json.load(vocab_handle)
534
+ self.decoder = {v: k for k, v in self.encoder.items()}
535
+ self.errors = errors # how to handle errors in decoding
536
+ self.byte_encoder = bytes_to_unicode()
537
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
538
+ bpe_merges = []
539
+ with open(merges_file, encoding="utf-8") as merges_handle:
540
+ for i, line in enumerate(merges_handle):
541
+ line = line.strip()
542
+ if (i == 0 and line.startswith("#version:")) or not line:
543
+ continue
544
+ bpe_merges.append(tuple(line.split()))
545
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
546
+ # NOTE: the cache can grow without bound and will get really large for long running processes
547
+ # (esp. for texts of language that do not use space between word, e.g. Chinese); technically
548
+ # not a memory leak but appears as one.
549
+ # GPT2Tokenizer has the same problem, so let's be consistent.
550
+ self.cache = {}
551
+
552
+ self.pat = re.compile(PRETOKENIZE_REGEX)
553
+
554
+ if kwargs.get("add_prefix_space", False):
555
+ logger.warning_once(
556
+ f"{self.__class__.__name} does not support `add_prefix_space`, setting it to True has no effect."
557
+ )
558
+
559
+ super().__init__(
560
+ vocab_file=vocab_file,
561
+ merges_file=merges_file,
562
+ errors=errors,
563
+ unk_token=unk_token,
564
+ bos_token=bos_token,
565
+ eos_token=eos_token,
566
+ pad_token=pad_token,
567
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
568
+ split_special_tokens=split_special_tokens,
569
+ **kwargs,
570
+ )
571
+
572
+ self.prepare_extra_tokenizers(vocab_file)
573
+
574
+ @property
575
+ def vocab_size(self) -> int:
576
+ return len(self.encoder)
577
+
578
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_vocab
579
+ def get_vocab(self):
580
+ return dict(self.encoder, **self.added_tokens_encoder)
581
+
582
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe
583
+ def bpe(self, token):
584
+ if token in self.cache:
585
+ return self.cache[token]
586
+ word = tuple(token)
587
+ pairs = get_pairs(word)
588
+
589
+ if not pairs:
590
+ return token
591
+
592
+ while True:
593
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
594
+ if bigram not in self.bpe_ranks:
595
+ break
596
+ first, second = bigram
597
+ new_word = []
598
+ i = 0
599
+ while i < len(word):
600
+ try:
601
+ j = word.index(first, i)
602
+ except ValueError:
603
+ new_word.extend(word[i:])
604
+ break
605
+ else:
606
+ new_word.extend(word[i:j])
607
+ i = j
608
+
609
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
610
+ new_word.append(first + second)
611
+ i += 2
612
+ else:
613
+ new_word.append(word[i])
614
+ i += 1
615
+ new_word = tuple(new_word)
616
+ word = new_word
617
+ if len(word) == 1:
618
+ break
619
+ else:
620
+ pairs = get_pairs(word)
621
+ word = " ".join(word)
622
+ self.cache[token] = word
623
+ return word
624
+
625
+ def prepare_extra_tokenizers(self, vocab_file: str) -> None:
626
+ """
627
+ Prepare domain-specific tokenizers.
628
+
629
+ Define variables/maps here which guide domain-specific tokenization later.
630
+ """
631
+ # Load extra tokenizers with SentencePiece model
632
+ dir_name = os.path.dirname(vocab_file)
633
+
634
+ self.sp_model_SMILES = spm.SentencePieceProcessor()
635
+ self.sp_model_SMILES.Load(os.path.join(dir_name, "tokenizer_SMILES.model"))
636
+ self.sp_model_SMILES.offset = self.init_kwargs["offset_SMILES"]
637
+
638
+ self.sp_model_PROT = spm.SentencePieceProcessor()
639
+ self.sp_model_PROT.Load(os.path.join(dir_name, "tokenizer_PROT.model"))
640
+ self.sp_model_PROT.offset = self.init_kwargs["offset_PROT"]
641
+
642
+ self.sp_model_XNA = spm.SentencePieceProcessor()
643
+ self.sp_model_XNA.Load(os.path.join(dir_name, "tokenizer_XNA.model"))
644
+ self.sp_model_XNA.offset = self.init_kwargs["offset_XNA"]
645
+
646
+ base_mapping = {
647
+ "SMILES": self.sp_model_SMILES,
648
+ "protein": self.sp_model_PROT,
649
+ "dna": self.sp_model_XNA,
650
+ "rna": self.sp_model_XNA,
651
+ }
652
+ auto_detect_mapping = {
653
+ "SMILES": self.sp_model_SMILES,
654
+ "PROT": self.sp_model_PROT,
655
+ "XNA": self.sp_model_XNA,
656
+ }
657
+ # Guiding tokens of domain-specific tokenization
658
+ self.ex_begin_mapping = {f"<{key}>": value for key, value in base_mapping.items()}
659
+ self.ex_end_mapping = {f"</{key}>": value for key, value in base_mapping.items()}
660
+ # Transient markers for auto-detection, these tokens will not be assigned token ids
661
+ self.ex_auto_begin_mapping = {f"<{key}_AUTO_DETECT>": value for key, value in auto_detect_mapping.items()}
662
+ self.ex_auto_end_mapping = {f"</{key}_AUTO_DETECT>": value for key, value in auto_detect_mapping.items()}
663
+ # Token markers to prevent unwanted auto-detection
664
+ self.ex_protect_begin_tokens = ["<MOLFORMULA>"]
665
+ self.ex_protect_end_tokens = ["</MOLFORMULA>"]
666
+ # For simplicity
667
+ self.ex_protect_tokens = self.ex_protect_begin_tokens + self.ex_protect_end_tokens
668
+ self.ex_all_begin_mapping = self.ex_begin_mapping | self.ex_auto_begin_mapping
669
+ self.ex_all_end_mapping = self.ex_end_mapping | self.ex_auto_end_mapping
670
+
671
+ # Update encoder & decoder with extra tokenizers
672
+ for tokenizer_name, sp_model in [
673
+ ("SMILES", self.sp_model_SMILES),
674
+ ("PROT", self.sp_model_PROT),
675
+ ("XNA", self.sp_model_XNA),
676
+ ]:
677
+ self.decoder.update(
678
+ {i + sp_model.offset: sp_model.id_to_piece(i) for i in range(sp_model.get_piece_size())}
679
+ )
680
+ # Not really used, only to fill holes in encoder, to keep methods like `add_tokens` working
681
+ self.encoder.update(
682
+ {
683
+ f"<|{tokenizer_name}_{sp_model.id_to_piece(i)}|>": i + sp_model.offset
684
+ for i in range(sp_model.get_piece_size())
685
+ }
686
+ )
687
+
688
+ # protect-tokens should keep complete temporarily to guide later tokenization
689
+ # it will be segmented later
690
+ for token in self.ex_protect_tokens:
691
+ self.tokens_trie.add(token)
692
+
693
+ self._unk_token = "<unk>" # Fall-back
694
+ self.check_module_list = [SmilesCheckModule(), ProtCheckModule(), XnaCheckModule()]
695
+
696
+ def _pop_logical_sp_token(self, extra_tokenizer_stack: list, mapping_name: str) -> None:
697
+ """Switch tokenizer when it comes to an end sp token"""
698
+ extra_tokenizer = extra_tokenizer_stack.pop()
699
+ if extra_tokenizer != self.ex_all_end_mapping[mapping_name]:
700
+ logger.warning_once(
701
+ f"Encounter incorrect nesting of extra tokenizer: {self.ex_all_end_mapping[mapping_name]} and {extra_tokenizer}"
702
+ )
703
+ logger.warning_once("This may lead to unexpected behaviour of the tokenizer, please check your input.")
704
+
705
+ def tokenize(self, text: TextInput, **kwargs) -> list[str]:
706
+ """
707
+ Converts a string into a sequence of tokens, using the tokenizer.
708
+
709
+ It will switch to domain-specific tokenizer once encountering extra/logical sp tokens.
710
+
711
+ Args:
712
+ text: TextInput
713
+ """
714
+ split_special_tokens = kwargs.pop("split_special_tokens", self.split_special_tokens)
715
+
716
+ text, kwargs = self.prepare_for_tokenization(text, **kwargs)
717
+
718
+ if kwargs:
719
+ logger.warning(f"Keyword arguments {kwargs} not recognized.")
720
+
721
+ if hasattr(self, "do_lower_case") and self.do_lower_case:
722
+ # convert non-special tokens to lowercase. Might be super slow as well?
723
+ escaped_special_toks = [re.escape(s_tok) for s_tok in (self.all_special_tokens)]
724
+ escaped_special_toks += [
725
+ re.escape(s_tok.content)
726
+ for s_tok in (self._added_tokens_decoder.values())
727
+ if not s_tok.special and s_tok.normalized
728
+ ]
729
+ pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)"
730
+ text = re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text)
731
+
732
+ if split_special_tokens:
733
+ no_split_token = []
734
+ tokens = [text]
735
+ else:
736
+ no_split_token = self._added_tokens_encoder.keys() # don't split on any of the added tokens
737
+ # "This is something<special_token_1> else"
738
+ tokens = self.tokens_trie.split(text)
739
+
740
+ # ["This is something", "<special_token_1>", " else"]
741
+ for i, token in enumerate(tokens):
742
+ if token in no_split_token:
743
+ tok_extended = self._added_tokens_decoder.get(self._added_tokens_encoder[token], None)
744
+ left = tokens[i - 1] if i > 0 else None
745
+ right = tokens[i + 1] if i < len(tokens) - 1 else None
746
+ if isinstance(tok_extended, AddedToken):
747
+ if tok_extended.rstrip and right:
748
+ # A bit counter-intuitive but we strip the left of the string
749
+ # since tok_extended.rstrip means the special token is eating all white spaces on its right
750
+ tokens[i + 1] = right.lstrip()
751
+ # Strip white spaces on the left
752
+ if tok_extended.lstrip and left:
753
+ tokens[i - 1] = left.rstrip() # Opposite here
754
+ if tok_extended.single_word and left and left[-1] != " ":
755
+ tokens[i - 1] += token
756
+ tokens[i] = ""
757
+ elif tok_extended.single_word and right and right[0] != " ":
758
+ tokens[i + 1] = token + tokens[i + 1]
759
+ tokens[i] = ""
760
+ else:
761
+ raise ValueError(
762
+ f"{tok_extended} cannot be tokenized because it was not properly added"
763
+ f" to the tokenizer. This means that it is not an `AddedToken` but a {type(tok_extended)}"
764
+ )
765
+
766
+ # ["This is something", "<special_token_1>", "else"]
767
+ tokenized_text = []
768
+
769
+ # Codes for automatically detecting domain-specific content
770
+ # All parts that have been marked by domain-specific or protection tokens will not be subject to auto detection
771
+ # See transformers/tests/models/intern_s1/test_tokenization_intern_s1.py::test_auto_detection() for more details
772
+ new_tokens = []
773
+ not_split_flag = 0
774
+ for token in tokens:
775
+ if not token:
776
+ continue
777
+ if token in no_split_token or token in self.ex_protect_tokens:
778
+ new_tokens.append(token)
779
+ if token in self.ex_begin_mapping or token in self.ex_protect_begin_tokens:
780
+ not_split_flag += 1 # In case nested sp tokens
781
+ elif token in self.ex_end_mapping or token in self.ex_protect_end_tokens:
782
+ not_split_flag = max(0, not_split_flag - 1)
783
+ else:
784
+ if not_split_flag:
785
+ new_tokens.append(token)
786
+ else:
787
+ for check_module in self.check_module_list:
788
+ token = check_module.re_split(token)
789
+
790
+ new_tokens.extend(token)
791
+ tokens = new_tokens
792
+
793
+ # Use stack to maintain which tokenizer should be used, considering the possibility of nested extra tokenizer
794
+ extra_tokenizer_stack = []
795
+ for token in tokens:
796
+ # Need to skip eventual empty (fully stripped) tokens
797
+ if not token:
798
+ continue
799
+ # protect-tokens are not assigned token ids, should be segmented here
800
+ if token in self.ex_protect_tokens:
801
+ tokenized_text.extend(self._tokenize(token))
802
+ # push tokenizer to stack when encountering begin token
803
+ elif token in self.ex_all_begin_mapping:
804
+ tokenized_text.append(token)
805
+ extra_tokenizer_stack.append(self.ex_all_begin_mapping[token])
806
+ # pop tokenizer from stack when encountering end token
807
+ elif token in self.ex_all_end_mapping:
808
+ tokenized_text.append(token)
809
+ if extra_tokenizer_stack:
810
+ self._pop_logical_sp_token(extra_tokenizer_stack, token)
811
+ # other special tokens
812
+ elif token in no_split_token:
813
+ tokenized_text.append(token)
814
+ else:
815
+ tokenized_text.extend(self._tokenize(token, extra_tokenizer_stack=extra_tokenizer_stack))
816
+
817
+ # ["This", " is", " something", "<special_token_1>", "else"]
818
+ return tokenized_text
819
+
820
+ def _tokenize(self, text, **kwargs):
821
+ """
822
+ Modified from `transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._tokenize`.
823
+
824
+ This adaptation supports domain-specific tokenizers.
825
+ """
826
+ extra_tokenizer_stack = kwargs.pop("extra_tokenizer_stack", False)
827
+ if extra_tokenizer_stack:
828
+ tokenized_text = extra_tokenizer_stack[-1].encode(text, out_type=str)
829
+ tokenized_id = extra_tokenizer_stack[-1].encode(text, out_type=int)
830
+ final_tokenized_text = []
831
+ for text_piece, id_piece in zip(tokenized_text, tokenized_id):
832
+ if id_piece == 0:
833
+ final_tokenized_text.extend(self._bpe_tokenize(text_piece))
834
+ else:
835
+ final_tokenized_text.append(text_piece)
836
+ return final_tokenized_text
837
+ else:
838
+ return self._bpe_tokenize(text)
839
+
840
+ def _bpe_tokenize(self, text, **kwargs):
841
+ text = text.replace(
842
+ "▁", " "
843
+ ) # This discrepancy stems from differing whitespace treatment in SentencePiece versus BPE tokenization.
844
+ bpe_tokens = []
845
+ for token in re.findall(self.pat, text):
846
+ token = "".join(
847
+ self.byte_encoder[b] for b in token.encode("utf-8")
848
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
849
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
850
+ return bpe_tokens
851
+
852
+ def convert_tokens_to_ids(self, tokens: Union[str, list[str]]) -> Union[int, list[int]]:
853
+ """
854
+ Modified from `transformers.tokenization_utils.PreTrainedTokenzier.convert_tokens_to_ids`.
855
+
856
+ Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the
857
+ vocabulary.
858
+
859
+ This adaptation supports domain-specific tokenizers.
860
+
861
+ Args:
862
+ tokens (`str` or `List[str]`): One or several token(s) to convert to token id(s).
863
+
864
+ Returns:
865
+ `int` or `List[int]`: The token id or list of token ids.
866
+ """
867
+ if tokens is None:
868
+ return None
869
+
870
+ if isinstance(tokens, str):
871
+ return self._convert_token_to_id_with_added_voc(tokens)
872
+
873
+ ids = []
874
+ extra_tokenizer_stack = []
875
+
876
+ for token in tokens:
877
+ if token not in self.ex_auto_begin_mapping and token not in self.ex_auto_end_mapping:
878
+ ids.append(
879
+ self._convert_token_to_id_with_added_voc(token, extra_tokenizer_stack=extra_tokenizer_stack)
880
+ )
881
+ if token in self.ex_all_begin_mapping:
882
+ extra_tokenizer_stack.append(self.ex_all_begin_mapping[token])
883
+ elif token in self.ex_all_end_mapping:
884
+ if extra_tokenizer_stack:
885
+ self._pop_logical_sp_token(extra_tokenizer_stack, token)
886
+ return ids
887
+
888
+ def _convert_token_to_id_with_added_voc(self, token, **kwargs):
889
+ """
890
+ Modified from `transformers.tokenization_utils.PreTrainedTokenzier._convert_token_to_id_with_added_voc`.
891
+
892
+ This adaptation supports domain-specific tokenizers.
893
+ """
894
+ if token is None:
895
+ return None
896
+
897
+ if token in self._added_tokens_encoder:
898
+ return self._added_tokens_encoder[token]
899
+ return self._convert_token_to_id(token, **kwargs)
900
+
901
+ def _convert_token_to_id(self, token, **kwargs):
902
+ """
903
+ Modified from `transformers.tokenization_utils.PreTrainedTokenzier._convert_token_to_id`.
904
+
905
+ Converts a token (str) in an id using the vocab.
906
+
907
+ Fall back to original tokenizer once OOV.
908
+ """
909
+ extra_tokenizer_stack = kwargs.pop("extra_tokenizer_stack", False)
910
+ if extra_tokenizer_stack:
911
+ token_id = extra_tokenizer_stack[-1].piece_to_id(token)
912
+ if token_id == extra_tokenizer_stack[-1].unk_id():
913
+ return self.encoder.get(token, self.encoder.get(self._unk_token))
914
+ else:
915
+ return token_id + extra_tokenizer_stack[-1].offset
916
+ else:
917
+ return self.encoder.get(token, self.encoder.get(self._unk_token))
918
+
919
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token
920
+ def _convert_id_to_token(self, index):
921
+ """Converts an index (integer) in a token (str) using the vocab."""
922
+ return self.decoder.get(index)
923
+
924
+ def convert_tokens_to_string(self, tokens):
925
+ """Converts a sequence of tokens (string) in a single string."""
926
+ text = "".join(tokens)
927
+ text = text.replace(
928
+ "▁", "Ġ"
929
+ ) # This discrepancy stems from differing whitespace treatment in SentencePiece versus BPE tokenization.
930
+ text = text.replace("\n", "Ċ")
931
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
932
+ return text
933
+
934
+ def decode(
935
+ self,
936
+ token_ids,
937
+ skip_special_tokens: bool = False,
938
+ clean_up_tokenization_spaces: Optional[bool] = False,
939
+ spaces_between_special_tokens: bool = False,
940
+ **kwargs,
941
+ ) -> str:
942
+ # `spaces_between_special_tokens` defaults to True for _decode in slow tokenizers
943
+ # and cannot be configured elsewhere, but it should default to False for InternS1Tokenizer
944
+ return super().decode(
945
+ token_ids,
946
+ skip_special_tokens=skip_special_tokens,
947
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
948
+ spaces_between_special_tokens=spaces_between_special_tokens,
949
+ **kwargs,
950
+ )
951
+
952
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
953
+ """
954
+ Modified from `transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary` to support saving custom extension.
955
+ """
956
+ if not os.path.isdir(save_directory):
957
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
958
+ return
959
+ vocab_file = os.path.join(
960
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
961
+ )
962
+ merge_file = os.path.join(
963
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
964
+ )
965
+ sp_model_smiles = os.path.join(
966
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["sp_model_SMILES"]
967
+ )
968
+ sp_model_prot = os.path.join(
969
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["sp_model_PROT"]
970
+ )
971
+ sp_model_xna = os.path.join(
972
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["sp_model_XNA"]
973
+ )
974
+
975
+ with open(vocab_file, "w", encoding="utf-8") as f:
976
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
977
+
978
+ index = 0
979
+ with open(merge_file, "w", encoding="utf-8") as writer:
980
+ writer.write("#version: 0.2\n")
981
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
982
+ if index != token_index:
983
+ logger.warning(
984
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
985
+ " Please check that the tokenizer is not corrupted!"
986
+ )
987
+ index = token_index
988
+ writer.write(" ".join(bpe_tokens) + "\n")
989
+ index += 1
990
+
991
+ with open(sp_model_smiles, "wb") as f:
992
+ f.write(self.sp_model_SMILES.serialized_model_proto())
993
+
994
+ with open(sp_model_prot, "wb") as f:
995
+ f.write(self.sp_model_PROT.serialized_model_proto())
996
+
997
+ with open(sp_model_xna, "wb") as f:
998
+ f.write(self.sp_model_XNA.serialized_model_proto())
999
+
1000
+ return vocab_file, merge_file
1001
+
1002
+ def prepare_for_tokenization(self, text, **kwargs):
1003
+ text = unicodedata.normalize("NFC", text)
1004
+ return (text, kwargs)
1005
+
1006
+
1007
+ __all__ = ["InternS1Tokenizer"]
tokenizer_PROT.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1144f52f86f3ca5a29940d69b037e508c05a89e6eedbe42bea641e226b20dbe0
3
+ size 12118
tokenizer_SMILES.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fba1c97da0353ccbffd368ae78e311ccbc762aa5ba74f9aff8bf2ab363c4d37d
3
+ size 14775
tokenizer_XNA.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58fc8bfb2af3dfe936a13dad8a9cb28dab7850b70b358db19605d867c133fb35
3
+ size 15451
tokenizer_config.json ADDED
@@ -0,0 +1,448 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ },
213
+ "151669": {
214
+ "content": "<IMG_CONTEXT>",
215
+ "lstrip": false,
216
+ "normalized": false,
217
+ "rstrip": false,
218
+ "single_word": false,
219
+ "special": true
220
+ },
221
+ "151670": {
222
+ "content": "<img>",
223
+ "lstrip": false,
224
+ "normalized": false,
225
+ "rstrip": false,
226
+ "single_word": false,
227
+ "special": true
228
+ },
229
+ "151671": {
230
+ "content": "</img>",
231
+ "lstrip": false,
232
+ "normalized": false,
233
+ "rstrip": false,
234
+ "single_word": false,
235
+ "special": true
236
+ },
237
+ "151672": {
238
+ "content": "<quad>",
239
+ "lstrip": false,
240
+ "normalized": false,
241
+ "rstrip": false,
242
+ "single_word": false,
243
+ "special": true
244
+ },
245
+ "151673": {
246
+ "content": "</quad>",
247
+ "lstrip": false,
248
+ "normalized": false,
249
+ "rstrip": false,
250
+ "single_word": false,
251
+ "special": true
252
+ },
253
+ "151674": {
254
+ "content": "<ref>",
255
+ "lstrip": false,
256
+ "normalized": false,
257
+ "rstrip": false,
258
+ "single_word": false,
259
+ "special": true
260
+ },
261
+ "151675": {
262
+ "content": "</ref>",
263
+ "lstrip": false,
264
+ "normalized": false,
265
+ "rstrip": false,
266
+ "single_word": false,
267
+ "special": true
268
+ },
269
+ "151676": {
270
+ "content": "<box>",
271
+ "lstrip": false,
272
+ "normalized": false,
273
+ "rstrip": false,
274
+ "single_word": false,
275
+ "special": true
276
+ },
277
+ "151677": {
278
+ "content": "</box>",
279
+ "lstrip": false,
280
+ "normalized": false,
281
+ "rstrip": false,
282
+ "single_word": false,
283
+ "special": true
284
+ },
285
+ "151678": {
286
+ "content": "<|action_start|>",
287
+ "lstrip": false,
288
+ "normalized": false,
289
+ "rstrip": false,
290
+ "single_word": false,
291
+ "special": true
292
+ },
293
+ "151679": {
294
+ "content": "<|action_end|>",
295
+ "lstrip": false,
296
+ "normalized": false,
297
+ "rstrip": false,
298
+ "single_word": false,
299
+ "special": true
300
+ },
301
+ "151680": {
302
+ "content": "<|interpreter|>",
303
+ "lstrip": false,
304
+ "normalized": false,
305
+ "rstrip": false,
306
+ "single_word": false,
307
+ "special": true
308
+ },
309
+ "151681": {
310
+ "content": "<|plugin|>",
311
+ "lstrip": false,
312
+ "normalized": false,
313
+ "rstrip": false,
314
+ "single_word": false,
315
+ "special": true
316
+ },
317
+ "151682": {
318
+ "content": "<video>",
319
+ "lstrip": false,
320
+ "normalized": false,
321
+ "rstrip": false,
322
+ "single_word": false,
323
+ "special": true
324
+ },
325
+ "151683": {
326
+ "content": "<|ts|>",
327
+ "lstrip": false,
328
+ "normalized": false,
329
+ "rstrip": false,
330
+ "single_word": false,
331
+ "special": true
332
+ },
333
+ "151684": {
334
+ "content": "<|/ts|>",
335
+ "lstrip": false,
336
+ "normalized": false,
337
+ "rstrip": false,
338
+ "single_word": false,
339
+ "special": true
340
+ },
341
+ "151685": {
342
+ "content": "<TS_CONTEXT>",
343
+ "lstrip": false,
344
+ "normalized": false,
345
+ "rstrip": false,
346
+ "single_word": false,
347
+ "special": true
348
+ },
349
+ "151686": {
350
+ "content": "<SMILES>",
351
+ "lstrip": false,
352
+ "normalized": false,
353
+ "rstrip": false,
354
+ "single_word": false,
355
+ "special": false
356
+ },
357
+ "151687": {
358
+ "content": "</SMILES>",
359
+ "lstrip": false,
360
+ "normalized": false,
361
+ "rstrip": false,
362
+ "single_word": false,
363
+ "special": false
364
+ },
365
+ "151688": {
366
+ "content": "<protein>",
367
+ "lstrip": false,
368
+ "normalized": false,
369
+ "rstrip": false,
370
+ "single_word": false,
371
+ "special": false
372
+ },
373
+ "151689": {
374
+ "content": "</protein>",
375
+ "lstrip": false,
376
+ "normalized": false,
377
+ "rstrip": false,
378
+ "single_word": false,
379
+ "special": false
380
+ },
381
+ "151690": {
382
+ "content": "<dna>",
383
+ "lstrip": false,
384
+ "normalized": false,
385
+ "rstrip": false,
386
+ "single_word": false,
387
+ "special": false
388
+ },
389
+ "151691": {
390
+ "content": "</dna>",
391
+ "lstrip": false,
392
+ "normalized": false,
393
+ "rstrip": false,
394
+ "single_word": false,
395
+ "special": false
396
+ },
397
+ "151692": {
398
+ "content": "<rna>",
399
+ "lstrip": false,
400
+ "normalized": false,
401
+ "rstrip": false,
402
+ "single_word": false,
403
+ "special": false
404
+ },
405
+ "151693": {
406
+ "content": "</rna>",
407
+ "lstrip": false,
408
+ "normalized": false,
409
+ "rstrip": false,
410
+ "single_word": false,
411
+ "special": false
412
+ }
413
+ },
414
+ "additional_special_tokens": [
415
+ "<|im_start|>",
416
+ "<|im_end|>",
417
+ "<|object_ref_start|>",
418
+ "<|object_ref_end|>",
419
+ "<|box_start|>",
420
+ "<|box_end|>",
421
+ "<|quad_start|>",
422
+ "<|quad_end|>",
423
+ "<|vision_start|>",
424
+ "<|vision_end|>",
425
+ "<|vision_pad|>",
426
+ "<|image_pad|>",
427
+ "<|video_pad|>"
428
+ ],
429
+ "auto_map": {
430
+ "AutoTokenizer": [
431
+ "tokenization_interns1.InternS1Tokenizer",
432
+ "tokenization_interns1.InternS1Tokenizer"
433
+ ]
434
+ },
435
+ "bos_token": "<|im_start|>",
436
+ "clean_up_tokenization_spaces": false,
437
+ "eos_token": "<|im_end|>",
438
+ "errors": "replace",
439
+ "extra_special_tokens": {},
440
+ "model_max_length": 262144,
441
+ "offset_SMILES": 151694,
442
+ "offset_PROT": 152718,
443
+ "offset_XNA": 153742,
444
+ "pad_token": "<|endoftext|>",
445
+ "split_special_tokens": false,
446
+ "tokenizer_class": "InternS1Tokenizer",
447
+ "unk_token": null
448
+ }
video_preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "size": {
3
+ "longest_edge": 25165824,
4
+ "shortest_edge": 4096
5
+ },
6
+ "patch_size": 16,
7
+ "temporal_patch_size": 2,
8
+ "merge_size": 2,
9
+ "image_mean": [
10
+ 0.5,
11
+ 0.5,
12
+ 0.5
13
+ ],
14
+ "image_std": [
15
+ 0.5,
16
+ 0.5,
17
+ 0.5
18
+ ],
19
+ "auto_map": {
20
+ "AutoVideoProcessor": "video_processing_interns1_pro.InternS1ProVideoProcessor"
21
+ }
22
+ }
video_processing_interns1_pro.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from typing import Optional, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+
21
+ from transformers.feature_extraction_utils import BatchFeature
22
+ from transformers.image_utils import ChannelDimension, PILImageResampling, SizeDict, get_image_size
23
+ from transformers.processing_utils import Unpack, VideosKwargs
24
+ from transformers.utils import TensorType, add_start_docstrings, logging
25
+ from transformers.video_processing_utils import BASE_VIDEO_PROCESSOR_DOCSTRING, BaseVideoProcessor
26
+ from transformers.video_utils import VideoMetadata, group_videos_by_shape, reorder_videos
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+
32
+ def smart_resize(
33
+ num_frames: int,
34
+ height: int,
35
+ width: int,
36
+ temporal_factor: int = 2,
37
+ factor: int = 32,
38
+ min_pixels: int = 128 * 128,
39
+ max_pixels: int = 16 * 16 * 2 * 2 * 2 * 6144,
40
+ ):
41
+ if num_frames < temporal_factor:
42
+ raise ValueError(f"t:{num_frames} must be larger than temporal_factor:{temporal_factor}")
43
+ if height < factor or width < factor:
44
+ raise ValueError(f"height:{height} or width:{width} must be larger than factor:{factor}")
45
+ elif max(height, width) / min(height, width) > 200:
46
+ raise ValueError(
47
+ f"absolute aspect ratio must be smaller than 200, got {max(height, width) / min(height, width)}"
48
+ )
49
+ h_bar = round(height / factor) * factor
50
+ w_bar = round(width / factor) * factor
51
+ t_bar = round(num_frames / temporal_factor) * temporal_factor
52
+
53
+ if t_bar * h_bar * w_bar > max_pixels:
54
+ beta = math.sqrt((num_frames * height * width) / max_pixels)
55
+ h_bar = max(factor, math.floor(height / beta / factor) * factor)
56
+ w_bar = max(factor, math.floor(width / beta / factor) * factor)
57
+ elif t_bar * h_bar * w_bar < min_pixels:
58
+ beta = math.sqrt(min_pixels / (num_frames * height * width))
59
+ h_bar = math.ceil(height * beta / factor) * factor
60
+ w_bar = math.ceil(width * beta / factor) * factor
61
+
62
+ return h_bar, w_bar
63
+
64
+
65
+ class InternS1ProProcessorInitKwargs(VideosKwargs, total=False):
66
+ patch_size: int
67
+ temporal_patch_size: int
68
+ merge_size: int
69
+ min_frames: int
70
+ max_frames: int
71
+
72
+
73
+ class InternS1ProVideoProcessor(BaseVideoProcessor):
74
+ resample = PILImageResampling.BICUBIC
75
+ size = {"shortest_edge": 128 * 32 * 32, "longest_edge": 32 * 32 * 768}
76
+ image_mean = [0.5, 0.5, 0.5]
77
+ image_std = [0.5, 0.5, 0.5]
78
+ do_resize = True
79
+ do_rescale = True
80
+ do_normalize = True
81
+ do_convert_rgb = True
82
+ patch_size = 16
83
+ temporal_patch_size = 2
84
+ merge_size = 2
85
+ fps = 2
86
+ min_frames = 4
87
+ max_frames = 768
88
+ do_sample_frames = True
89
+ valid_kwargs = InternS1ProProcessorInitKwargs
90
+ model_input_names = ["pixel_values_videos", "video_grid_thw"]
91
+
92
+ def __init__(self, **kwargs: Unpack[InternS1ProProcessorInitKwargs]):
93
+ super().__init__(**kwargs)
94
+ if self.size is not None and (
95
+ self.size.get("shortest_edge", None) is None or self.size.get("longest_edge", None) is None
96
+ ):
97
+ raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
98
+
99
+ def _further_process_kwargs(
100
+ self,
101
+ size: Optional[SizeDict] = None,
102
+ **kwargs,
103
+ ) -> dict:
104
+ """
105
+ Update kwargs that need further processing before being validated
106
+ Can be overridden by subclasses to customize the processing of kwargs.
107
+ """
108
+ if size is not None and ("shortest_edge" not in size or "longest_edge" not in size):
109
+ raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
110
+
111
+ return super()._further_process_kwargs(size=size, **kwargs)
112
+
113
+ def sample_frames(
114
+ self,
115
+ metadata: VideoMetadata,
116
+ num_frames: Optional[int] = None,
117
+ fps: Optional[Union[int, float]] = None,
118
+ **kwargs,
119
+ ):
120
+ """
121
+ Default sampling function which uniformly samples the desired number of frames between 0 and total number of frames.
122
+ If `fps` is passed along with metadata, `fps` frames per second are sampled uniformty. Arguments `num_frames`
123
+ and `fps` are mutually exclusive.
124
+
125
+ Args:
126
+ video (`torch.Tensor`):
127
+ Video that need to be sampled.
128
+ metadata (`VideoMetadata`):
129
+ Metadata of the video containing information about total duration, fps and total number of frames.
130
+ num_frames (`int`, *optional*):
131
+ Maximum number of frames to sample. Defaults to `self.num_frames`.
132
+ fps (`int` or `float`, *optional*):
133
+ Target frames to sample per second. Defaults to `self.fps`.
134
+ Returns:
135
+ torch.Tensor:
136
+ Sampled video frames.
137
+ """
138
+ if fps is not None and num_frames is not None:
139
+ raise ValueError("`num_frames` and `fps` are mutually exclusive arguments, please use only one!")
140
+
141
+ total_num_frames = metadata.total_num_frames
142
+ fps = fps if fps is not None else self.fps
143
+
144
+ # If num_frames is not given but fps is, calculate num_frames from fps
145
+ if num_frames is None and fps is not None:
146
+ if metadata.fps is None:
147
+ metadata.fps = 24
148
+ logger.warning_once(
149
+ "Asked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. "
150
+ "Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results."
151
+ )
152
+ num_frames = int(total_num_frames / metadata.fps * fps)
153
+ num_frames = min(max(num_frames, self.min_frames), self.max_frames, total_num_frames)
154
+
155
+ if num_frames is None:
156
+ num_frames = min(max(total_num_frames, self.min_frames), self.max_frames)
157
+
158
+ indices = np.linspace(0, total_num_frames - 1, num_frames).round().astype(int)
159
+
160
+ return indices
161
+
162
+ def _preprocess(
163
+ self,
164
+ videos: list[torch.Tensor],
165
+ do_convert_rgb: bool = True,
166
+ do_resize: bool = True,
167
+ size: Optional[SizeDict] = None,
168
+ interpolation: PILImageResampling = PILImageResampling.BICUBIC,
169
+ do_rescale: bool = True,
170
+ rescale_factor: float = 1 / 255.0,
171
+ do_normalize: bool = True,
172
+ image_mean: Optional[Union[float, list[float]]] = None,
173
+ image_std: Optional[Union[float, list[float]]] = None,
174
+ patch_size: Optional[int] = None,
175
+ temporal_patch_size: Optional[int] = None,
176
+ merge_size: Optional[int] = None,
177
+ return_tensors: Optional[Union[str, TensorType]] = None,
178
+ **kwargs,
179
+ ):
180
+ grouped_videos, grouped_videos_index = group_videos_by_shape(videos)
181
+ resized_videos_grouped = {}
182
+
183
+ for shape, stacked_videos in grouped_videos.items():
184
+ B, T, C, H, W = stacked_videos.shape
185
+ num_frames, height, width = T, H, W
186
+ if do_resize:
187
+ resized_height, resized_width = smart_resize(
188
+ num_frames=num_frames,
189
+ height=height,
190
+ width=width,
191
+ temporal_factor=temporal_patch_size,
192
+ factor=patch_size * merge_size,
193
+ min_pixels=size.shortest_edge,
194
+ max_pixels=size.longest_edge,
195
+ )
196
+ stacked_videos = stacked_videos.view(B * T, C, H, W)
197
+ stacked_videos = self.resize(
198
+ stacked_videos,
199
+ size=SizeDict(height=resized_height, width=resized_width),
200
+ interpolation=interpolation,
201
+ )
202
+ stacked_videos = stacked_videos.view(B, T, C, resized_height, resized_width)
203
+ resized_videos_grouped[shape] = stacked_videos
204
+ resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index)
205
+
206
+ # Group videos by size for further processing
207
+ # Needed in case do_resize is False, or resize returns videos with different sizes
208
+ grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos)
209
+ processed_videos_grouped = {}
210
+ processed_grids = {}
211
+ for shape, stacked_videos in grouped_videos.items():
212
+ resized_height, resized_width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST)
213
+
214
+ # Fused rescale and normalize
215
+ stacked_videos = self.rescale_and_normalize(
216
+ stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std
217
+ )
218
+ patches = stacked_videos
219
+
220
+ # Check that videos have `num_frames` divisible by `temporal_patch_size`
221
+ if patches.shape[1] % temporal_patch_size != 0:
222
+ repeats = patches[:, -1:].repeat(1, temporal_patch_size - 1, 1, 1, 1)
223
+ patches = torch.cat([patches, repeats], dim=1)
224
+ batch_size, grid_t, channel = patches.shape[:3]
225
+ grid_t = grid_t // temporal_patch_size
226
+ grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
227
+
228
+ patches = patches.view(
229
+ batch_size,
230
+ grid_t,
231
+ temporal_patch_size,
232
+ channel,
233
+ grid_h // merge_size,
234
+ merge_size,
235
+ patch_size,
236
+ grid_w // merge_size,
237
+ merge_size,
238
+ patch_size,
239
+ )
240
+ patches = patches.permute(0, 1, 4, 7, 5, 8, 3, 2, 6, 9)
241
+ flatten_patches = patches.reshape(
242
+ batch_size,
243
+ grid_t * grid_h * grid_w,
244
+ channel * temporal_patch_size * patch_size * patch_size,
245
+ )
246
+
247
+ processed_videos_grouped[shape] = flatten_patches
248
+ processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size
249
+
250
+ processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index)
251
+ processed_grids = reorder_videos(processed_grids, grouped_videos_index)
252
+ pixel_values_videos = torch.cat(processed_videos, dim=0)
253
+ video_grid_thw = torch.tensor(processed_grids)
254
+ data = {
255
+ "pixel_values_videos": pixel_values_videos,
256
+ "video_grid_thw": video_grid_thw,
257
+ }
258
+
259
+ return BatchFeature(data=data, tensor_type=return_tensors)
260
+
261
+
262
+ __all__ = ["InternS1ProVideoProcessor"]