jiang-cc commited on
Commit
67ef89b
·
verified ·
1 Parent(s): a188f2d

Upload processor

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
chat_template.jinja ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system
2
+ You are a helpful assistant.<|im_end|>
3
+ {% endif %}<|im_start|>{{ message['role'] }}
4
+ {% if message['content'] is string %}{{ message['content'] }}<|im_end|>
5
+ {% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>
6
+ {% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant
7
+ {% endif %}
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
modeling_yangjian.py ADDED
@@ -0,0 +1,709 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import numpy as np
5
+ from typing import Any, Callable, Optional, Union
6
+
7
+ from transformers import Qwen2_5_VLForConditionalGeneration
8
+ from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import (
9
+ Qwen2_5_VisionTransformerPretrainedModel,
10
+ Qwen2_5_VLModel,
11
+ Qwen2_5_VLModelOutputWithPast,
12
+ is_torchdynamo_compiling,
13
+ Qwen2RMSNorm,
14
+ Qwen2_5_VLMLP,
15
+ eager_attention_forward,
16
+ ALL_ATTENTION_FUNCTIONS
17
+ )
18
+ from transformers.image_utils import ImageInput
19
+ from transformers.tokenization_utils import TextInput, PreTokenizedInput
20
+ from transformers.video_utils import VideoInput
21
+ from transformers.feature_extraction_utils import BatchFeature
22
+
23
+ from transformers import Qwen2_5_VLProcessor, Qwen2_5_VLConfig
24
+ from transformers.models.qwen2_5_vl.processing_qwen2_5_vl import Qwen2_5_VLProcessorKwargs
25
+
26
+ class YangJianConfig(Qwen2_5_VLConfig):
27
+ model_type = "yangjian"
28
+ def __init__(self, **kwargs):
29
+ super().__init__(**kwargs)
30
+ self.vision_config.compare_token_size = 100
31
+ self.architectures = ["YangJianVLForConditionalGeneration"]
32
+
33
+ class YangJianProcessor(Qwen2_5_VLProcessor):
34
+
35
+ def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs):
36
+ super().__init__(image_processor, tokenizer, video_processor, chat_template, **kwargs)
37
+ self.compare_token_size = 100 if "compare_token_size" not in kwargs else kwargs["compare_token_size"]
38
+
39
+ def __call__(
40
+ self,
41
+ images: ImageInput = None,
42
+ text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
43
+ videos: VideoInput = None,
44
+ **kwargs,
45
+ ) -> BatchFeature:
46
+ """
47
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
48
+ and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
49
+ the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwrags` arguments to
50
+ Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`.
51
+
52
+ Args:
53
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
54
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
55
+ tensor. Both channels-first and channels-last formats are supported.
56
+ text (`str`, `list[str]`, `list[list[str]]`):
57
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
58
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
59
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
60
+ videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
61
+ The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
62
+ tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
63
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
64
+ If set, will return tensors of a particular framework. Acceptable values are:
65
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
66
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
67
+ - `'np'`: Return NumPy `np.ndarray` objects.
68
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
69
+
70
+ Returns:
71
+ [`BatchFeature`]: A [`BatchFeature`] with the following fields:
72
+
73
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
74
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
75
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
76
+ `None`).
77
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
78
+ - **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
79
+ - **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
80
+ - **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
81
+ - **second_per_grid_ts** -- List of video seconds per time grid. Returned when `videos` is not `None`.
82
+ """
83
+ output_kwargs = self._merge_kwargs(
84
+ Qwen2_5_VLProcessorKwargs,
85
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
86
+ **kwargs,
87
+ )
88
+
89
+ image_inputs = videos_inputs = {}
90
+ if images is not None:
91
+ image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
92
+ image_grid_thw = image_inputs["image_grid_thw"]
93
+
94
+ if videos is not None:
95
+ fps = output_kwargs["videos_kwargs"].get("fps", 2.0)
96
+ videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
97
+ video_grid_thw = videos_inputs["video_grid_thw"]
98
+
99
+ if isinstance(fps, (int, float)):
100
+ second_per_grid_ts = [self.video_processor.temporal_patch_size / fps] * len(video_grid_thw)
101
+ elif hasattr(fps, "__len__") and len(fps) == len(video_grid_thw):
102
+ second_per_grid_ts = [self.video_processor.temporal_patch_size / tmp for tmp in fps]
103
+ else:
104
+ raise ValueError(
105
+ f"The length of fps ({len(fps) if hasattr(fps, '__len__') else fps}) must be equal to the length of video_grid_thw ({len(video_grid_thw)}) or fps should be a single number."
106
+ )
107
+ videos_inputs.update({"second_per_grid_ts": second_per_grid_ts})
108
+
109
+ if not isinstance(text, list):
110
+ text = [text]
111
+
112
+ text = text.copy() # below lines change text in-place
113
+ if images is not None:
114
+ merge_length = self.image_processor.merge_size**2
115
+ index = 0
116
+ for i in range(len(text)):
117
+ while self.image_token in text[i]:
118
+ num_image_tokens = image_grid_thw[index].prod() // merge_length
119
+ text[i] = text[i].replace(self.image_token, "<|placeholder|>" * (num_image_tokens + self.compare_token_size), 1)
120
+ index += 1
121
+ text[i] = text[i].replace("<|placeholder|>", self.image_token)
122
+
123
+ if videos is not None:
124
+ merge_length = self.video_processor.merge_size**2
125
+ index = 0
126
+ for i in range(len(text)):
127
+ while self.video_token in text[i]:
128
+ num_video_tokens = video_grid_thw[index].prod() // merge_length
129
+ text[i] = text[i].replace(self.video_token, "<|placeholder|>" * num_video_tokens, 1)
130
+ index += 1
131
+ text[i] = text[i].replace("<|placeholder|>", self.video_token)
132
+
133
+ return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
134
+ return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", None)
135
+ text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
136
+ self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"])
137
+
138
+ if return_mm_token_type_ids:
139
+ array_ids = np.array(text_inputs["input_ids"])
140
+ mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
141
+ mm_token_type_ids[array_ids == self.image_token_id] = 1
142
+ text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
143
+
144
+ return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors)
145
+
146
+
147
+ class OptimizedCrossAttention(nn.Module):
148
+ """
149
+ 仿照 Qwen2_5_VLVisionAttention 结构的优化 Cross Attention
150
+ """
151
+ def __init__(self, config, is_cross_attention=False):
152
+ super().__init__()
153
+ self.config = config
154
+ self.dim = config.hidden_size
155
+ self.num_heads = config.num_heads
156
+ self.head_dim = self.dim // self.num_heads
157
+ self.num_key_value_groups = 1 # 对于 cross attention,通常设为 1
158
+ self.scaling = self.head_dim**-0.5
159
+ self.attention_dropout = 0.0
160
+ self.is_causal = False # cross attention 不需要因果掩码
161
+ self.is_cross_attention = is_cross_attention
162
+
163
+ if is_cross_attention:
164
+ # Cross attention: Q 来自一个序列,K、V 来自另一个序列
165
+ self.q_proj = nn.Linear(self.dim, self.dim, bias=True)
166
+ self.kv = nn.Linear(self.dim, self.dim * 2, bias=True) # 融合 K、V
167
+ else:
168
+ # Self attention: Q、K、V 来自同一个序列
169
+ self.qkv = nn.Linear(self.dim, self.dim * 3, bias=True) # 融合 Q、K、V
170
+
171
+ self.proj = nn.Linear(self.dim, self.dim, bias=True)
172
+
173
+ def forward(
174
+ self,
175
+ query_states: torch.Tensor,
176
+ key_value_states: Optional[torch.Tensor] = None,
177
+ attention_mask: Optional[torch.Tensor] = None,
178
+ **kwargs,
179
+ ) -> torch.Tensor:
180
+ """
181
+ Args:
182
+ query_states: [seq_len_q, hidden_size] 或 [batch_size, seq_len_q, hidden_size]
183
+ key_value_states: [seq_len_kv, hidden_size] 或 [batch_size, seq_len_kv, hidden_size]
184
+ 如果为 None,则执行 self attention
185
+ """
186
+ # 处理输入维度
187
+ if query_states.dim() == 2:
188
+ query_states = query_states.unsqueeze(0) # [1, seq_len_q, hidden_size]
189
+ squeeze_output = True
190
+ else:
191
+ squeeze_output = False
192
+
193
+ batch_size, seq_len_q, _ = query_states.shape
194
+
195
+ if self.is_cross_attention and key_value_states is not None:
196
+ # Cross Attention
197
+ if key_value_states.dim() == 2:
198
+ key_value_states = key_value_states.unsqueeze(0) # [1, seq_len_kv, hidden_size]
199
+
200
+ # 计算 Q
201
+ q = self.q_proj(query_states) # [batch_size, seq_len_q, hidden_size]
202
+
203
+ # 计算 K、V(融合计算)
204
+ kv = self.kv(key_value_states) # [batch_size, seq_len_kv, hidden_size * 2]
205
+ seq_len_kv = kv.shape[1]
206
+
207
+ # 分离 K、V
208
+ k, v = kv.reshape(batch_size, seq_len_kv, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4).unbind(0)
209
+ # k, v: [batch_size, num_heads, seq_len_kv, head_dim]
210
+
211
+ # 重塑 Q
212
+ q = q.reshape(batch_size, seq_len_q, self.num_heads, self.head_dim).transpose(1, 2)
213
+ # q: [batch_size, num_heads, seq_len_q, head_dim]
214
+
215
+ else:
216
+ # Self Attention
217
+ if key_value_states is None:
218
+ key_value_states = query_states
219
+
220
+ # 融合计算 Q、K、V
221
+ qkv = self.qkv(query_states) # [batch_size, seq_len, hidden_size * 3]
222
+
223
+ # 分离 Q、K、V
224
+ q, k, v = qkv.reshape(batch_size, seq_len_q, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4).unbind(0)
225
+ # q, k, v: [batch_size, num_heads, seq_len, head_dim]
226
+
227
+ # 选择 attention 实现
228
+ attention_interface: Callable = eager_attention_forward
229
+ if hasattr(self.config, '_attn_implementation') and self.config._attn_implementation != "eager":
230
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
231
+
232
+ # 执行 attention 计算
233
+ attn_output, _ = attention_interface(
234
+ self,
235
+ q,
236
+ k,
237
+ v,
238
+ attention_mask=attention_mask,
239
+ dropout=0.0 if not self.training else self.attention_dropout,
240
+ scaling=self.scaling,
241
+ is_causal=False,
242
+ **kwargs,
243
+ )
244
+
245
+ # 重塑输出
246
+ attn_output = attn_output.transpose(1, 2).contiguous() # [batch_size, seq_len_q, num_heads, head_dim]
247
+ attn_output = attn_output.reshape(batch_size, seq_len_q, self.dim) # [batch_size, seq_len_q, hidden_size]
248
+
249
+ # 输出投影
250
+ attn_output = self.proj(attn_output)
251
+
252
+ # 如果输入是 2D,则输出也应该是 2D
253
+ if squeeze_output:
254
+ attn_output = attn_output.squeeze(0) # [seq_len_q, hidden_size]
255
+
256
+ return attn_output
257
+
258
+
259
+ class YangJianCompareVisualEncoder(nn.Module):
260
+ def __init__(self, config):
261
+ super().__init__()
262
+ self.config = config
263
+ self.hidden_size = config.hidden_size
264
+ self.token_size = 100 * (config.spatial_merge_size**2) if "compare_token_size" not in config else config.compare_token_size * (config.spatial_merge_size**2)
265
+
266
+ # Encoder 部分:双向图像特征交互
267
+ # 第一个cross attention: previous attend to current
268
+ self.encoder_cross_attn1 = OptimizedCrossAttention(config, is_cross_attention=True)
269
+ # 第二个cross attention: current attend to previous
270
+ self.encoder_cross_attn2 = OptimizedCrossAttention(config, is_cross_attention=True)
271
+
272
+ self.encoder_norm1 = Qwen2RMSNorm(self.hidden_size, eps=1e-6)
273
+ self.encoder_norm2 = Qwen2RMSNorm(self.hidden_size, eps=1e-6)
274
+ self.encoder_norm3 = Qwen2RMSNorm(self.hidden_size, eps=1e-6)
275
+ self.encoder_norm4 = Qwen2RMSNorm(self.hidden_size, eps=1e-6)
276
+ self.encoder_mlp1 = Qwen2_5_VLMLP(config)
277
+ self.encoder_mlp2 = Qwen2_5_VLMLP(config)
278
+
279
+ # Decoder 部分:Query 与编码特征交互
280
+ # 可学习的 Query Embeddings
281
+ self.query_embeddings = nn.Parameter(
282
+ torch.randn(self.token_size, self.hidden_size) * 0.02
283
+ )
284
+
285
+ # 只保留 Cross Attention for queries to attend to encoded features
286
+ self.decoder_cross_attn = OptimizedCrossAttention(config, is_cross_attention=True)
287
+
288
+ self.decoder_norm1 = Qwen2RMSNorm(self.hidden_size, eps=1e-6)
289
+ self.decoder_norm2 = Qwen2RMSNorm(self.hidden_size, eps=1e-6)
290
+ self.decoder_mlp = Qwen2_5_VLMLP(config)
291
+
292
+ def _ensure_device_dtype_consistency(self, target_tensor):
293
+ """
294
+ 确保所有模块组件都在目标张量的设备上并使用相同的数据类型
295
+ """
296
+ device = target_tensor.device
297
+ dtype = target_tensor.dtype
298
+
299
+ # 移动 attention 模块到正确设备
300
+ self.encoder_cross_attn1 = self.encoder_cross_attn1.to(device=device, dtype=dtype)
301
+ self.encoder_cross_attn2 = self.encoder_cross_attn2.to(device=device, dtype=dtype)
302
+ self.decoder_cross_attn = self.decoder_cross_attn.to(device=device, dtype=dtype)
303
+
304
+ # 移动 norm 层到正确设备
305
+ self.encoder_norm1 = self.encoder_norm1.to(device=device, dtype=dtype)
306
+ self.encoder_norm2 = self.encoder_norm2.to(device=device, dtype=dtype)
307
+ self.encoder_norm3 = self.encoder_norm3.to(device=device, dtype=dtype)
308
+ self.encoder_norm4 = self.encoder_norm4.to(device=device, dtype=dtype)
309
+ self.decoder_norm1 = self.decoder_norm1.to(device=device, dtype=dtype)
310
+ self.decoder_norm2 = self.decoder_norm2.to(device=device, dtype=dtype)
311
+
312
+ # 移动 MLP 到正确设备
313
+ self.encoder_mlp1 = self.encoder_mlp1.to(device=device, dtype=dtype)
314
+ self.encoder_mlp2 = self.encoder_mlp2.to(device=device, dtype=dtype)
315
+ self.decoder_mlp = self.decoder_mlp.to(device=device, dtype=dtype)
316
+
317
+ def forward(self, images_hidden_states: list) -> list:
318
+ """
319
+ Args:
320
+ images_hidden_states: List of tensor, each tensor has shape [seq_len, hidden_size]
321
+
322
+ Returns:
323
+ List of compare visual embeddings, each has shape [token_size, hidden_size]
324
+ """
325
+ if not images_hidden_states:
326
+ return []
327
+
328
+ # 确保所有组件的设备和数据类型一致
329
+ self._ensure_device_dtype_consistency(images_hidden_states[0])
330
+
331
+ compare_visual_embeds = []
332
+
333
+ for i in range(len(images_hidden_states)):
334
+ current_hidden_state = images_hidden_states[i] # [seq_len_current, hidden_size]
335
+ previous_hidden_state = images_hidden_states[i-1] if i > 0 else current_hidden_state # [seq_len_prev, hidden_size]
336
+
337
+ # Encoder 部分:双向图像特征交互
338
+ encoded_features = self._encoder_forward(current_hidden_state, previous_hidden_state)
339
+
340
+ # Decoder 部分:Query 与编码特征交互
341
+ compare_visual_embed = self._decoder_forward(encoded_features)
342
+
343
+ compare_visual_embeds.append(compare_visual_embed)
344
+
345
+ return compare_visual_embeds
346
+
347
+ def _encoder_forward(self, current_features, previous_features):
348
+ """
349
+ Encoder: 双向图像特征交互
350
+ 1. previous attend to current
351
+ 2. current attend to previous
352
+ """
353
+ # 确保数据类型和设备一致
354
+ device = current_features.device
355
+ dtype = current_features.dtype
356
+ previous_features = previous_features.to(device=device, dtype=dtype)
357
+
358
+ # 第一步:previous attend to current
359
+ residual = previous_features
360
+
361
+ # Layer norm
362
+ previous_normed = self.encoder_norm1(previous_features)
363
+ current_normed1 = self.encoder_norm1(current_features)
364
+
365
+ # Cross attention: previous attend to current
366
+ cross_attn_output1 = self.encoder_cross_attn1(
367
+ query_states=previous_normed,
368
+ key_value_states=current_normed1
369
+ )
370
+
371
+ # Residual connection
372
+ previous_features = residual + cross_attn_output1
373
+
374
+ # MLP for previous features
375
+ residual = previous_features
376
+ mlp_input1 = self.encoder_norm2(previous_features)
377
+ mlp_output1 = self.encoder_mlp1(mlp_input1)
378
+ previous_features = residual + mlp_output1
379
+
380
+ # 第二步:current attend to previous (enhanced)
381
+ residual = current_features
382
+
383
+ # Layer norm
384
+ current_normed2 = self.encoder_norm3(current_features)
385
+ previous_normed2 = self.encoder_norm3(previous_features) # 使用增强后的 previous features
386
+
387
+ # Cross attention: current attend to previous
388
+ cross_attn_output2 = self.encoder_cross_attn2(
389
+ query_states=current_normed2,
390
+ key_value_states=previous_normed2
391
+ )
392
+
393
+ # Residual connection
394
+ current_features = residual + cross_attn_output2
395
+
396
+ # MLP for current features
397
+ residual = current_features
398
+ mlp_input2 = self.encoder_norm4(current_features)
399
+ mlp_output2 = self.encoder_mlp2(mlp_input2)
400
+ current_features = residual + mlp_output2
401
+
402
+ return current_features
403
+
404
+ def _decoder_forward(self, encoded_features):
405
+ """
406
+ Decoder: Query 与编码特征交互(仅使用 cross attention)
407
+ """
408
+ # 获取设备和数据类型
409
+ device = encoded_features.device
410
+ dtype = encoded_features.dtype
411
+
412
+ # 初始化 queries 并确保设备和数据类型一致
413
+ queries = self.query_embeddings.to(device=device, dtype=dtype)
414
+
415
+ # Cross attention: queries attend to encoded features
416
+ residual = queries
417
+ queries_normed = self.decoder_norm1(queries)
418
+ encoded_normed = self.decoder_norm1(encoded_features)
419
+
420
+ cross_attn_output = self.decoder_cross_attn(
421
+ query_states=queries_normed,
422
+ key_value_states=encoded_normed
423
+ )
424
+
425
+ queries = residual + cross_attn_output
426
+
427
+ # MLP
428
+ residual = queries
429
+ mlp_input = self.decoder_norm2(queries)
430
+ mlp_output = self.decoder_mlp(mlp_input)
431
+ queries = residual + mlp_output
432
+
433
+ return queries # [token_size, hidden_size]
434
+
435
+
436
+ # 先把组件继承出来方便修改
437
+ class YangJianVisionTransformerPretrainedModel(Qwen2_5_VisionTransformerPretrainedModel):
438
+ def __init__(self, config, *inputs, **kwargs) -> None:
439
+ super().__init__(config, *inputs, **kwargs)
440
+ self.compare_visual_encoder = YangJianCompareVisualEncoder(config)
441
+
442
+ def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor:
443
+ """
444
+ Args:
445
+ hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
446
+ The final hidden states of the model.
447
+ grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):
448
+ The temporal, height and width of feature shape of each image in LLM.
449
+
450
+ Returns:
451
+ `torch.Tensor`: hidden_states, compare_visual_embeds.
452
+ """
453
+ hidden_states = self.patch_embed(hidden_states)
454
+ rotary_pos_emb = self.rot_pos_emb(grid_thw)
455
+ window_index, cu_window_seqlens = self.get_window_index(grid_thw)
456
+ cu_window_seqlens = torch.tensor(
457
+ cu_window_seqlens,
458
+ device=hidden_states.device,
459
+ dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
460
+ )
461
+ cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)
462
+
463
+ seq_len, _ = hidden_states.size()
464
+ hidden_states = hidden_states.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
465
+ hidden_states = hidden_states[window_index, :, :]
466
+ hidden_states = hidden_states.reshape(seq_len, -1)
467
+ rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
468
+ rotary_pos_emb = rotary_pos_emb[window_index, :, :]
469
+ rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)
470
+ emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
471
+ position_embeddings = (emb.cos(), emb.sin())
472
+
473
+ cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
474
+ dim=0,
475
+ # Select dtype based on the following factors:
476
+ # - FA2 requires that cu_seqlens_q must have dtype int32
477
+ # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
478
+ # See https://github.com/huggingface/transformers/pull/34852 for more information
479
+ dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
480
+ )
481
+ cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
482
+
483
+ for layer_num, blk in enumerate(self.blocks):
484
+ if layer_num in self.fullatt_block_indexes:
485
+ cu_seqlens_now = cu_seqlens
486
+ else:
487
+ cu_seqlens_now = cu_window_seqlens
488
+
489
+ attention_mask = self._prepare_attention_mask(hidden_states, cu_seqlens_now)
490
+ hidden_states = blk(
491
+ hidden_states,
492
+ cu_seqlens=cu_seqlens_now,
493
+ position_embeddings=position_embeddings,
494
+ attention_mask=attention_mask,
495
+ **kwargs,
496
+ )
497
+
498
+ split_sizes = grid_thw.prod(-1).tolist()
499
+ splited_hidden_states_before_merger = torch.split(hidden_states, split_sizes)
500
+ compare_visual_embeds = self.compare_visual_encoder(splited_hidden_states_before_merger)
501
+ # compare_visual_embeds = self.merger(compare_visual_embeds)
502
+ for i, embeds in enumerate(compare_visual_embeds):
503
+ compare_visual_embeds[i] = self.merger(embeds)
504
+
505
+ hidden_states = self.merger(hidden_states)
506
+ reverse_indices = torch.argsort(window_index)
507
+ hidden_states = hidden_states[reverse_indices, :]
508
+
509
+ return hidden_states, compare_visual_embeds
510
+
511
+ class YangJianVLModel(Qwen2_5_VLModel):
512
+ def __init__(self, config):
513
+ super().__init__(config)
514
+ self.visual = YangJianVisionTransformerPretrainedModel._from_config(config.vision_config)
515
+ # self.learnable_image_embeddings = nn.Parameter(
516
+ # torch.randn(100, config.hidden_size) * 0.02 # 使用小的初始化值
517
+ # )
518
+
519
+ def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None):
520
+ """
521
+ Encodes images into continuous embeddings that can be forwarded to the language model.
522
+
523
+ Args:
524
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
525
+ The tensors corresponding to the input images.
526
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
527
+ The temporal, height and width of feature shape of each image in LLM.
528
+ """
529
+ pixel_values = pixel_values.type(self.visual.dtype)
530
+ image_embeds, compare_visual_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)
531
+ # 每个图像添加了对比感知token
532
+ split_sizes = (image_grid_thw.prod(-1) // self.visual.spatial_merge_size**2).tolist()
533
+ image_embeds = torch.split(image_embeds, split_sizes)
534
+
535
+ # 将图像嵌入和对比视觉嵌入拼接
536
+ enhanced_image_embeds = []
537
+ for i, embeds in enumerate(image_embeds):
538
+ # 确保 compare_visual_embeds[i] 与 embeds 在相同设备和数据类型
539
+ compare_embed = compare_visual_embeds[i].to(device=embeds.device, dtype=embeds.dtype)
540
+ enhanced_embeds = torch.cat([embeds, compare_embed], dim=0)
541
+ enhanced_image_embeds.append(enhanced_embeds)
542
+
543
+ # image_embeds = torch.cat(enhanced_image_embeds, dim=0)
544
+ return enhanced_image_embeds
545
+
546
+
547
+ def forward(
548
+ self,
549
+ input_ids: torch.LongTensor = None,
550
+ attention_mask: Optional[torch.Tensor] = None,
551
+ position_ids: Optional[torch.LongTensor] = None,
552
+ past_key_values: Optional[list[torch.FloatTensor]] = None,
553
+ inputs_embeds: Optional[torch.FloatTensor] = None,
554
+ use_cache: Optional[bool] = None,
555
+ output_attentions: Optional[bool] = None,
556
+ output_hidden_states: Optional[bool] = None,
557
+ return_dict: Optional[bool] = None,
558
+ pixel_values: Optional[torch.Tensor] = None,
559
+ pixel_values_videos: Optional[torch.FloatTensor] = None,
560
+ image_grid_thw: Optional[torch.LongTensor] = None,
561
+ video_grid_thw: Optional[torch.LongTensor] = None,
562
+ rope_deltas: Optional[torch.LongTensor] = None,
563
+ cache_position: Optional[torch.LongTensor] = None,
564
+ second_per_grid_ts: Optional[torch.Tensor] = None,
565
+ **kwargs,
566
+ ) -> Union[tuple, Qwen2_5_VLModelOutputWithPast]:
567
+ r"""
568
+ pixel_values_videos (`torch.FloatTensor` of shape `(seq_length, num_channels * temporal_size * image_size * image_size)):
569
+ The tensors corresponding to the input videos. Pixel values can be obtained using
570
+ [`AutoImageProcessor`]. See [`Qwen2VLImageProcessor.__call__`] for details. [`Qwen2_5_VLProcessor`] uses
571
+ [`Qwen2VLImageProcessor`] for processing videos.
572
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
573
+ The temporal, height and width of feature shape of each image in LLM.
574
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
575
+ The temporal, height and width of feature shape of each video in LLM.
576
+ rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
577
+ The rope index difference between sequence length and multimodal rope.
578
+ second_per_grid_ts (`torch.Tensor` of shape `(num_videos)`, *optional*):
579
+ The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs.
580
+ """
581
+
582
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
583
+ output_hidden_states = (
584
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
585
+ )
586
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
587
+
588
+ if inputs_embeds is None:
589
+ inputs_embeds = self.get_input_embeddings()(input_ids)
590
+ if pixel_values is not None:
591
+ image_embeds = self.get_image_features(pixel_values, image_grid_thw)
592
+
593
+ # # 为每个图像添加 100 个可学习的 embedding
594
+ # learnable_embeddings = self.learnable_image_embeddings.to(image_embeds[0].device, image_embeds[0].dtype)
595
+ # enhanced_image_embeds = []
596
+
597
+ # for i, embeds in enumerate(image_embeds):
598
+ # # 为每个图像添加 100 个可学习的 embedding
599
+ # enhanced_embeds = torch.cat([embeds, learnable_embeddings], dim=0)
600
+ # enhanced_image_embeds.append(enhanced_embeds)
601
+
602
+ image_embeds = torch.cat(image_embeds, dim=0)
603
+ n_image_tokens = (input_ids == self.config.image_token_id).sum()
604
+ n_image_features = image_embeds.shape[0]
605
+ if not is_torchdynamo_compiling() and n_image_tokens != n_image_features:
606
+ raise ValueError(
607
+ f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
608
+ )
609
+
610
+ mask = input_ids == self.config.image_token_id
611
+ mask_unsqueezed = mask.unsqueeze(-1)
612
+ mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
613
+ image_mask = mask_expanded.to(inputs_embeds.device)
614
+
615
+ image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
616
+ inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
617
+
618
+ if pixel_values_videos is not None:
619
+ video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw)
620
+ video_embeds = torch.cat(video_embeds, dim=0)
621
+ n_video_tokens = (input_ids == self.config.video_token_id).sum()
622
+ n_video_features = video_embeds.shape[0]
623
+ if not is_torchdynamo_compiling() and n_video_tokens != n_video_features:
624
+ raise ValueError(
625
+ f"Video features and video tokens do not match: tokens: {n_video_tokens}, features {n_video_features}"
626
+ )
627
+
628
+ mask = input_ids == self.config.video_token_id
629
+ mask_unsqueezed = mask.unsqueeze(-1)
630
+ mask_expanded = mask_unsqueezed.expand_as(inputs_embeds)
631
+ video_mask = mask_expanded.to(inputs_embeds.device)
632
+
633
+ video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
634
+ inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
635
+
636
+ if position_ids is None:
637
+ attention_mask_tensor = (
638
+ attention_mask if not isinstance(attention_mask, dict) else attention_mask["full_attention"]
639
+ )
640
+ if attention_mask_tensor is not None and attention_mask_tensor.ndim == 4:
641
+ attention_mask_tensor = torch.diagonal(attention_mask_tensor[:, 0], dim1=1, dim2=2)
642
+ attention_mask_tensor = attention_mask_tensor / torch.finfo(attention_mask_tensor.dtype).min
643
+ attention_mask_tensor = (1.0 - attention_mask_tensor).int()
644
+
645
+ # Calculate RoPE index once per generation in the pre-fill stage only.
646
+ # When compiling, we can't check tensor values thus we check only input length
647
+ # It is safe to assume that `length!=1` means we're in pre-fill because compiled
648
+ # models currently cannot do asssisted decoding
649
+ prefill_compiled_stage = is_torchdynamo_compiling() and (
650
+ (input_ids is not None and input_ids.shape[1] != 1)
651
+ or (inputs_embeds is not None and inputs_embeds.shape[1] != 1)
652
+ )
653
+ prefill_noncompiled_stage = not is_torchdynamo_compiling() and (
654
+ (cache_position is not None and cache_position[0] == 0)
655
+ or (past_key_values is None or past_key_values.get_seq_length() == 0)
656
+ )
657
+ if (prefill_compiled_stage or prefill_noncompiled_stage) or self.rope_deltas is None:
658
+ position_ids, rope_deltas = self.get_rope_index(
659
+ input_ids,
660
+ image_grid_thw,
661
+ video_grid_thw,
662
+ second_per_grid_ts=second_per_grid_ts,
663
+ attention_mask=attention_mask_tensor,
664
+ )
665
+ self.rope_deltas = rope_deltas
666
+ # then use the prev pre-calculated rope-deltas to get the correct position ids
667
+ else:
668
+ batch_size, seq_length, _ = inputs_embeds.shape
669
+ delta = (
670
+ (cache_position[0] + self.rope_deltas).to(inputs_embeds.device)
671
+ if cache_position is not None
672
+ else 0
673
+ )
674
+ position_ids = torch.arange(seq_length, device=inputs_embeds.device)
675
+ position_ids = position_ids.view(1, -1).expand(batch_size, -1)
676
+ if cache_position is not None: # otherwise `deltas` is an int `0`
677
+ delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0)
678
+ position_ids = position_ids.add(delta)
679
+ position_ids = position_ids.unsqueeze(0).expand(3, -1, -1)
680
+
681
+ outputs = self.language_model(
682
+ input_ids=None,
683
+ position_ids=position_ids,
684
+ attention_mask=attention_mask,
685
+ past_key_values=past_key_values,
686
+ inputs_embeds=inputs_embeds,
687
+ use_cache=use_cache,
688
+ output_attentions=output_attentions,
689
+ output_hidden_states=output_hidden_states,
690
+ return_dict=True,
691
+ cache_position=cache_position,
692
+ **kwargs,
693
+ )
694
+
695
+ output = Qwen2_5_VLModelOutputWithPast(
696
+ last_hidden_state=outputs.last_hidden_state,
697
+ past_key_values=outputs.past_key_values,
698
+ hidden_states=outputs.hidden_states,
699
+ attentions=outputs.attentions,
700
+ rope_deltas=self.rope_deltas,
701
+ )
702
+ return output if return_dict else output.to_tuple()
703
+
704
+ class YangJianVLForConditionalGeneration(Qwen2_5_VLForConditionalGeneration):
705
+ config_class = YangJianConfig
706
+
707
+ def __init__(self, config):
708
+ super().__init__(config)
709
+ self.model = YangJianVLModel(config)
preprocessor_config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "modeling_yangjian.YangJianProcessor"
4
+ },
5
+ "do_convert_rgb": true,
6
+ "do_normalize": true,
7
+ "do_rescale": true,
8
+ "do_resize": true,
9
+ "image_mean": [
10
+ 0.48145466,
11
+ 0.4578275,
12
+ 0.40821073
13
+ ],
14
+ "image_processor_type": "Qwen2VLImageProcessor",
15
+ "image_std": [
16
+ 0.26862954,
17
+ 0.26130258,
18
+ 0.27577711
19
+ ],
20
+ "max_pixels": 12845056,
21
+ "merge_size": 2,
22
+ "min_pixels": 3136,
23
+ "patch_size": 14,
24
+ "processor_class": "YangJianProcessor",
25
+ "resample": 3,
26
+ "rescale_factor": 0.00392156862745098,
27
+ "size": {
28
+ "longest_edge": 12845056,
29
+ "shortest_edge": 3136
30
+ },
31
+ "temporal_patch_size": 2
32
+ }
processor_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "modeling_yangjian.YangJianProcessor"
4
+ },
5
+ "processor_class": "YangJianProcessor"
6
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba0c439f7be467bf47d12a7e6f9adc6116201056fc60c67f431c679b7c16afc8
3
+ size 11422064
tokenizer_config.json ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "auto_map": {
198
+ "AutoProcessor": "modeling_yangjian.YangJianProcessor"
199
+ },
200
+ "bos_token": null,
201
+ "clean_up_tokenization_spaces": false,
202
+ "eos_token": "<|im_end|>",
203
+ "errors": "replace",
204
+ "extra_special_tokens": {},
205
+ "max_length": null,
206
+ "model_max_length": 131072,
207
+ "pad_to_multiple_of": null,
208
+ "pad_token": "<|endoftext|>",
209
+ "pad_token_type_id": 0,
210
+ "padding_side": "right",
211
+ "processor_class": "YangJianProcessor",
212
+ "split_special_tokens": false,
213
+ "tokenizer_class": "Qwen2Tokenizer",
214
+ "unk_token": null
215
+ }
video_preprocessor_config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoProcessor": "modeling_yangjian.YangJianProcessor"
4
+ },
5
+ "crop_size": null,
6
+ "data_format": "channels_first",
7
+ "default_to_square": true,
8
+ "device": null,
9
+ "do_center_crop": null,
10
+ "do_convert_rgb": true,
11
+ "do_normalize": true,
12
+ "do_pad": null,
13
+ "do_rescale": true,
14
+ "do_resize": true,
15
+ "do_sample_frames": false,
16
+ "fps": null,
17
+ "image_mean": [
18
+ 0.48145466,
19
+ 0.4578275,
20
+ 0.40821073
21
+ ],
22
+ "image_std": [
23
+ 0.26862954,
24
+ 0.26130258,
25
+ 0.27577711
26
+ ],
27
+ "input_data_format": null,
28
+ "max_frames": 768,
29
+ "max_pixels": 12845056,
30
+ "merge_size": 2,
31
+ "min_frames": 4,
32
+ "min_pixels": 3136,
33
+ "num_frames": null,
34
+ "patch_size": 14,
35
+ "processor_class": "YangJianProcessor",
36
+ "resample": 3,
37
+ "rescale_factor": 0.00392156862745098,
38
+ "size": {
39
+ "longest_edge": 12845056,
40
+ "shortest_edge": 3136
41
+ },
42
+ "size_divisor": null,
43
+ "temporal_patch_size": 2,
44
+ "video_metadata": null,
45
+ "video_processor_type": "Qwen2VLVideoProcessor"
46
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff