chaitnya26 littlebird13 commited on
Commit
372a929
·
verified ·
0 Parent(s):

Duplicate from Qwen/Qwen-Image-Edit

Browse files

Co-authored-by: cheng <[email protected]>

Files changed (39) hide show
  1. .gitattributes +36 -0
  2. README.md +137 -0
  3. model_index.json +28 -0
  4. processor/added_tokens.json +24 -0
  5. processor/chat_template.jinja +7 -0
  6. processor/merges.txt +0 -0
  7. processor/preprocessor_config.json +37 -0
  8. processor/special_tokens_map.json +31 -0
  9. processor/tokenizer.json +3 -0
  10. processor/tokenizer_config.json +208 -0
  11. processor/video_preprocessor_config.json +43 -0
  12. processor/vocab.json +0 -0
  13. scheduler/scheduler_config.json +18 -0
  14. text_encoder/config.json +135 -0
  15. text_encoder/generation_config.json +14 -0
  16. text_encoder/model-00001-of-00004.safetensors +3 -0
  17. text_encoder/model-00002-of-00004.safetensors +3 -0
  18. text_encoder/model-00003-of-00004.safetensors +3 -0
  19. text_encoder/model-00004-of-00004.safetensors +3 -0
  20. text_encoder/model.safetensors.index.json +737 -0
  21. tokenizer/added_tokens.json +24 -0
  22. tokenizer/chat_template.jinja +54 -0
  23. tokenizer/merges.txt +0 -0
  24. tokenizer/special_tokens_map.json +31 -0
  25. tokenizer/tokenizer_config.json +207 -0
  26. tokenizer/vocab.json +0 -0
  27. transformer/config.json +17 -0
  28. transformer/diffusion_pytorch_model-00001-of-00009.safetensors +3 -0
  29. transformer/diffusion_pytorch_model-00002-of-00009.safetensors +3 -0
  30. transformer/diffusion_pytorch_model-00003-of-00009.safetensors +3 -0
  31. transformer/diffusion_pytorch_model-00004-of-00009.safetensors +3 -0
  32. transformer/diffusion_pytorch_model-00005-of-00009.safetensors +3 -0
  33. transformer/diffusion_pytorch_model-00006-of-00009.safetensors +3 -0
  34. transformer/diffusion_pytorch_model-00007-of-00009.safetensors +3 -0
  35. transformer/diffusion_pytorch_model-00008-of-00009.safetensors +3 -0
  36. transformer/diffusion_pytorch_model-00009-of-00009.safetensors +3 -0
  37. transformer/diffusion_pytorch_model.safetensors.index.json +0 -0
  38. vae/config.json +56 -0
  39. vae/diffusion_pytorch_model.safetensors +3 -0
.gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ processor/tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ language:
4
+ - en
5
+ - zh
6
+ library_name: diffusers
7
+ pipeline_tag: image-to-image
8
+ ---
9
+ <p align="center">
10
+ <img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_edit_logo.png" width="400"/>
11
+ <p>
12
+ <p align="center">
13
+ 💜 <a href="https://chat.qwen.ai/"><b>Qwen Chat</b></a>&nbsp&nbsp | &nbsp&nbsp🤗 <a href="https://huggingface.co/Qwen/Qwen-Image-Edit">Hugging Face</a>&nbsp&nbsp | &nbsp&nbsp🤖 <a href="https://modelscope.cn/models/Qwen/Qwen-Image-Edit">ModelScope</a>&nbsp&nbsp | &nbsp&nbsp 📑 <a href="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/Qwen_Image.pdf">Tech Report</a> &nbsp&nbsp | &nbsp&nbsp 📑 <a href="https://qwenlm.github.io/blog/qwen-image-edit/">Blog</a> &nbsp&nbsp
14
+ <br>
15
+ 🖥️ <a href="https://huggingface.co/spaces/Qwen/Qwen-Image-Edit">Demo</a>&nbsp&nbsp | &nbsp&nbsp💬 <a href="https://github.com/QwenLM/Qwen-Image/blob/main/assets/wechat.png">WeChat (微信)</a>&nbsp&nbsp | &nbsp&nbsp🫨 <a href="https://discord.gg/CV4E9rpNSD">Discord</a>&nbsp&nbsp| &nbsp&nbsp <a href="https://github.com/QwenLM/Qwen-Image">Github</a>&nbsp&nbsp
16
+ </p>
17
+
18
+ <p align="center">
19
+ <img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_homepage.jpg" width="1600"/>
20
+ <p>
21
+
22
+
23
+ # Introduction
24
+ We are excited to introduce Qwen-Image-Edit, the image editing version of Qwen-Image. Built upon our 20B Qwen-Image model, Qwen-Image-Edit successfully extends Qwen-Image’s unique text rendering capabilities to image editing tasks, enabling precise text editing. Furthermore, Qwen-Image-Edit simultaneously feeds the input image into Qwen2.5-VL (for visual semantic control) and the VAE Encoder (for visual appearance control), achieving capabilities in both semantic and appearance editing. To experience the latest model, visit [Qwen Chat](https://qwen.ai) and select the "Image Editing" feature.
25
+
26
+ Key Features:
27
+
28
+ * **Semantic and Appearance Editing**: Qwen-Image-Edit supports both low-level visual appearance editing (such as adding, removing, or modifying elements, requiring all other regions of the image to remain completely unchanged) and high-level visual semantic editing (such as IP creation, object rotation, and style transfer, allowing overall pixel changes while maintaining semantic consistency).
29
+ * **Precise Text Editing**: Qwen-Image-Edit supports bilingual (Chinese and English) text editing, allowing direct addition, deletion, and modification of text in images while preserving the original font, size, and style.
30
+ * **Strong Benchmark Performance**: Evaluations on multiple public benchmarks demonstrate that Qwen-Image-Edit achieves state-of-the-art (SOTA) performance in image editing tasks, establishing it as a powerful foundation model for image editing.
31
+
32
+
33
+
34
+ ## Quick Start
35
+
36
+ Install the latest version of diffusers
37
+ ```
38
+ pip install git+https://github.com/huggingface/diffusers
39
+ ```
40
+
41
+ The following contains a code snippet illustrating how to use the model to generate images based on text prompts:
42
+
43
+ ```python
44
+ import os
45
+ from PIL import Image
46
+ import torch
47
+
48
+ from diffusers import QwenImageEditPipeline
49
+
50
+ pipeline = QwenImageEditPipeline.from_pretrained("Qwen/Qwen-Image-Edit")
51
+ print("pipeline loaded")
52
+ pipeline.to(torch.bfloat16)
53
+ pipeline.to("cuda")
54
+ pipeline.set_progress_bar_config(disable=None)
55
+ image = Image.open("./input.png").convert("RGB")
56
+ prompt = "Change the rabbit's color to purple, with a flash light background."
57
+ inputs = {
58
+ "image": image,
59
+ "prompt": prompt,
60
+ "generator": torch.manual_seed(0),
61
+ "true_cfg_scale": 4.0,
62
+ "negative_prompt": " ",
63
+ "num_inference_steps": 50,
64
+ }
65
+
66
+ with torch.inference_mode():
67
+ output = pipeline(**inputs)
68
+ output_image = output.images[0]
69
+ output_image.save("output_image_edit.png")
70
+ print("image saved at", os.path.abspath("output_image_edit.png"))
71
+
72
+ ```
73
+
74
+ ## Showcase
75
+ One of the highlights of Qwen-Image-Edit lies in its powerful capabilities for semantic and appearance editing. Semantic editing refers to modifying image content while preserving the original visual semantics. To intuitively demonstrate this capability, let's take Qwen's mascot—Capybara—as an example:
76
+ ![Capibara](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片3.JPG#center)
77
+ As can be seen, although most pixels in the edited image differ from those in the input image (the leftmost image), the character consistency of Capybara is perfectly preserved. Qwen-Image-Edit's powerful semantic editing capability enables effortless and diverse creation of original IP content.
78
+ Furthermore, on Qwen Chat, we designed a series of editing prompts centered around the 16 MBTI personality types. Leveraging these prompts, we successfully created a set of MBTI-themed emoji packs based on our mascot Capybara, effortlessly expanding the IP's reach and expression.
79
+ ![MBTI meme series](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片4.JPG#center)
80
+ Moreover, novel view synthesis is another key application scenario in semantic editing. As shown in the two example images below, Qwen-Image-Edit can not only rotate objects by 90 degrees, but also perform a full 180-degree rotation, allowing us to directly see the back side of the object:
81
+ ![Viewpoint transformation 90 degrees](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片12.JPG#center)
82
+ ![Viewpoint transformation 180 degrees](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片13.JPG#center)
83
+ Another typical application of semantic editing is style transfer. For instance, given an input portrait, Qwen-Image-Edit can easily transform it into various artistic styles such as Studio Ghibli. This capability holds significant value in applications like virtual avatar creation:
84
+ ![Style transfer](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片1.JPG#center)
85
+ In addition to semantic editing, appearance editing is another common image editing requirement. Appearance editing emphasizes keeping certain regions of the image completely unchanged while adding, removing, or modifying specific elements. The image below illustrates a case where a signboard is added to the scene.
86
+ As shown, Qwen-Image-Edit not only successfully inserts the signboard but also generates a corresponding reflection, demonstrating exceptional attention to detail.
87
+ ![Adding a signboard](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片6.JPG#center)
88
+ Below is another interesting example, demonstrating how to remove fine hair strands and other small objects from an image.
89
+ ![Removing fine strands of hair](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片7.JPG#center)
90
+ Additionally, the color of a specific letter "n" in the image can be modified to blue, enabling precise editing of particular elements.
91
+ ![Modifying text color](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片8.JPG#center)
92
+ Appearance editing also has wide-ranging applications in scenarios such as adjusting a person's background or changing clothing. The three images below demonstrate these practical use cases respectively.
93
+ ![Modifying backgrounds](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片11.JPG#center)
94
+ ![Modifying clothing](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片5.JPG#center)
95
+ Another standout feature of Qwen-Image-Edit is its accurate text editing capability, which stems from Qwen-Image's deep expertise in text rendering. As shown below, the following two cases vividly demonstrate Qwen-Image-Edit's powerful performance in editing English text:
96
+ ![Editing English text 1](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片15.JPG#center)
97
+ ![Editing English text 2](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片16.JPG#center)
98
+ Qwen-Image-Edit can also directly edit Chinese posters, enabling not only modifications to large headline text but also precise adjustments to even small and intricate text elements.
99
+ ![Editing Chinese posters](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片17.JPG#center)
100
+ Finally, let's walk through a concrete image editing example to demonstrate how to use a chained editing approach to progressively correct errors in a calligraphy artwork generated by Qwen-Image:
101
+ ![Calligraphy artwork](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片18.JPG#center)
102
+ In this artwork, several Chinese characters contain generation errors. We can leverage Qwen-Image-Edit to correct them step by step. For instance, we can draw bounding boxes on the original image to mark the regions that need correction, instructing Qwen-Image-Edit to fix these specific areas. Here, we want the character "稽" to be correctly written within the red box, and the character "亭" to be accurately rendered in the blue region.
103
+ ![Correcting characters](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片19.JPG#center)
104
+ However, in practice, the character "稽" is relatively obscure, and the model fails to correct it correctly in one step. The lower-right component of "稽" should be "旨" rather than "日". At this point, we can further highlight the "日" portion with a red box, instructing Qwen-Image-Edit to fine-tune this detail and replace it with "旨".
105
+ ![Fine-tuning character](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片20.JPG#center)
106
+ Isn't it amazing? With this chained, step-by-step editing approach, we can continuously correct character errors until the desired final result is achieved.
107
+ ![Final version 1](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片21.JPG#center)
108
+ ![Final version 2](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片22.JPG#center)
109
+ ![Final version 3](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片23.JPG#center)
110
+ ![Final version 4](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片24.JPG#center)
111
+ ![Final version 5](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/edit_en/幻灯片25.JPG#center)
112
+ Finally, we have successfully obtained a completely correct calligraphy version of *Lantingji Xu (Orchid Pavilion Preface)*!
113
+ In summary, we hope that Qwen-Image-Edit can further advance the field of image generation, truly lower the technical barriers to visual content creation, and inspire even more innovative applications.
114
+
115
+
116
+ ## License Agreement
117
+
118
+ Qwen-Image is licensed under Apache 2.0.
119
+
120
+ ## Citation
121
+
122
+ We kindly encourage citation of our work if you find it useful.
123
+
124
+ ```bibtex
125
+ @misc{wu2025qwenimagetechnicalreport,
126
+ title={Qwen-Image Technical Report},
127
+ author={Chenfei Wu and Jiahao Li and Jingren Zhou and Junyang Lin and Kaiyuan Gao and Kun Yan and Sheng-ming Yin and Shuai Bai and Xiao Xu and Yilei Chen and Yuxiang Chen and Zecheng Tang and Zekai Zhang and Zhengyi Wang and An Yang and Bowen Yu and Chen Cheng and Dayiheng Liu and Deqing Li and Hang Zhang and Hao Meng and Hu Wei and Jingyuan Ni and Kai Chen and Kuan Cao and Liang Peng and Lin Qu and Minggang Wu and Peng Wang and Shuting Yu and Tingkun Wen and Wensen Feng and Xiaoxiao Xu and Yi Wang and Yichang Zhang and Yongqiang Zhu and Yujia Wu and Yuxuan Cai and Zenan Liu},
128
+ year={2025},
129
+ eprint={2508.02324},
130
+ archivePrefix={arXiv},
131
+ primaryClass={cs.CV},
132
+ url={https://arxiv.org/abs/2508.02324},
133
+ }
134
+ ```
135
+
136
+ ## Join Us
137
+ If you're passionate about fundamental research, we're hiring full-time employees (FTEs) and research interns. Don't wait — reach out to us at [email protected]
model_index.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "QwenImageEditPipeline",
3
+ "_diffusers_version": "0.35.0.dev0",
4
+ "processor": [
5
+ "transformers",
6
+ "Qwen2VLProcessor"
7
+ ],
8
+ "scheduler": [
9
+ "diffusers",
10
+ "FlowMatchEulerDiscreteScheduler"
11
+ ],
12
+ "text_encoder": [
13
+ "transformers",
14
+ "Qwen2_5_VLForConditionalGeneration"
15
+ ],
16
+ "tokenizer": [
17
+ "transformers",
18
+ "Qwen2Tokenizer"
19
+ ],
20
+ "transformer": [
21
+ "diffusers",
22
+ "QwenImageTransformer2DModel"
23
+ ],
24
+ "vae": [
25
+ "diffusers",
26
+ "AutoencoderKLQwenImage"
27
+ ]
28
+ }
processor/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
processor/chat_template.jinja ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {% set image_count = namespace(value=0) %}{% set video_count = namespace(value=0) %}{% for message in messages %}{% if loop.first and message['role'] != 'system' %}<|im_start|>system
2
+ You are a helpful assistant.<|im_end|>
3
+ {% endif %}<|im_start|>{{ message['role'] }}
4
+ {% if message['content'] is string %}{{ message['content'] }}<|im_end|>
5
+ {% else %}{% for content in message['content'] %}{% if content['type'] == 'image' or 'image' in content or 'image_url' in content %}{% set image_count.value = image_count.value + 1 %}{% if add_vision_id %}Picture {{ image_count.value }}: {% endif %}<|vision_start|><|image_pad|><|vision_end|>{% elif content['type'] == 'video' or 'video' in content %}{% set video_count.value = video_count.value + 1 %}{% if add_vision_id %}Video {{ video_count.value }}: {% endif %}<|vision_start|><|video_pad|><|vision_end|>{% elif 'text' in content %}{{ content['text'] }}{% endif %}{% endfor %}<|im_end|>
6
+ {% endif %}{% endfor %}{% if add_generation_prompt %}<|im_start|>assistant
7
+ {% endif %}
processor/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
processor/preprocessor_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "disable_grouping": null,
7
+ "do_center_crop": null,
8
+ "do_convert_rgb": true,
9
+ "do_normalize": true,
10
+ "do_rescale": true,
11
+ "do_resize": true,
12
+ "image_mean": [
13
+ 0.48145466,
14
+ 0.4578275,
15
+ 0.40821073
16
+ ],
17
+ "image_processor_type": "Qwen2VLImageProcessorFast",
18
+ "image_std": [
19
+ 0.26862954,
20
+ 0.26130258,
21
+ 0.27577711
22
+ ],
23
+ "input_data_format": null,
24
+ "max_pixels": 12845056,
25
+ "merge_size": 2,
26
+ "min_pixels": 3136,
27
+ "patch_size": 14,
28
+ "processor_class": "Qwen2VLProcessor",
29
+ "resample": 3,
30
+ "rescale_factor": 0.00392156862745098,
31
+ "return_tensors": null,
32
+ "size": {
33
+ "longest_edge": 12845056,
34
+ "shortest_edge": 3136
35
+ },
36
+ "temporal_patch_size": 2
37
+ }
processor/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
processor/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
processor/tokenizer_config.json ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "clean_up_tokenization_spaces": false,
199
+ "eos_token": "<|im_end|>",
200
+ "errors": "replace",
201
+ "extra_special_tokens": {},
202
+ "model_max_length": 131072,
203
+ "pad_token": "<|endoftext|>",
204
+ "processor_class": "Qwen2VLProcessor",
205
+ "split_special_tokens": false,
206
+ "tokenizer_class": "Qwen2Tokenizer",
207
+ "unk_token": null
208
+ }
processor/video_preprocessor_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "do_center_crop": null,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_pad": null,
10
+ "do_rescale": true,
11
+ "do_resize": true,
12
+ "do_sample_frames": false,
13
+ "fps": null,
14
+ "image_mean": [
15
+ 0.48145466,
16
+ 0.4578275,
17
+ 0.40821073
18
+ ],
19
+ "image_std": [
20
+ 0.26862954,
21
+ 0.26130258,
22
+ 0.27577711
23
+ ],
24
+ "input_data_format": null,
25
+ "max_frames": 768,
26
+ "max_pixels": 12845056,
27
+ "merge_size": 2,
28
+ "min_frames": 4,
29
+ "min_pixels": 3136,
30
+ "num_frames": null,
31
+ "patch_size": 14,
32
+ "processor_class": "Qwen2VLProcessor",
33
+ "resample": 3,
34
+ "rescale_factor": 0.00392156862745098,
35
+ "size": {
36
+ "longest_edge": 12845056,
37
+ "shortest_edge": 3136
38
+ },
39
+ "size_divisor": null,
40
+ "temporal_patch_size": 2,
41
+ "video_metadata": null,
42
+ "video_processor_type": "Qwen2VLVideoProcessor"
43
+ }
processor/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
scheduler/scheduler_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "FlowMatchEulerDiscreteScheduler",
3
+ "_diffusers_version": "0.35.0.dev0",
4
+ "base_image_seq_len": 256,
5
+ "base_shift": 0.5,
6
+ "invert_sigmas": false,
7
+ "max_image_seq_len": 8192,
8
+ "max_shift": 0.9,
9
+ "num_train_timesteps": 1000,
10
+ "shift": 1.0,
11
+ "shift_terminal": 0.02,
12
+ "stochastic_sampling": false,
13
+ "time_shift_type": "exponential",
14
+ "use_beta_sigmas": false,
15
+ "use_dynamic_shifting": true,
16
+ "use_exponential_sigmas": false,
17
+ "use_karras_sigmas": false
18
+ }
text_encoder/config.json ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2_5_VLForConditionalGeneration"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "eos_token_id": 151645,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 3584,
10
+ "image_token_id": 151655,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 18944,
13
+ "max_position_embeddings": 128000,
14
+ "max_window_layers": 28,
15
+ "model_type": "qwen2_5_vl",
16
+ "num_attention_heads": 28,
17
+ "num_hidden_layers": 28,
18
+ "num_key_value_heads": 4,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_scaling": {
21
+ "mrope_section": [
22
+ 16,
23
+ 24,
24
+ 24
25
+ ],
26
+ "rope_type": "default",
27
+ "type": "default"
28
+ },
29
+ "rope_theta": 1000000.0,
30
+ "sliding_window": 32768,
31
+ "text_config": {
32
+ "architectures": [
33
+ "Qwen2_5_VLForConditionalGeneration"
34
+ ],
35
+ "attention_dropout": 0.0,
36
+ "bos_token_id": 151643,
37
+ "eos_token_id": 151645,
38
+ "hidden_act": "silu",
39
+ "hidden_size": 3584,
40
+ "image_token_id": null,
41
+ "initializer_range": 0.02,
42
+ "intermediate_size": 18944,
43
+ "layer_types": [
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention",
52
+ "full_attention",
53
+ "full_attention",
54
+ "full_attention",
55
+ "full_attention",
56
+ "full_attention",
57
+ "full_attention",
58
+ "full_attention",
59
+ "full_attention",
60
+ "full_attention",
61
+ "full_attention",
62
+ "full_attention",
63
+ "full_attention",
64
+ "full_attention",
65
+ "full_attention",
66
+ "full_attention",
67
+ "full_attention",
68
+ "full_attention",
69
+ "full_attention",
70
+ "full_attention",
71
+ "full_attention"
72
+ ],
73
+ "max_position_embeddings": 128000,
74
+ "max_window_layers": 28,
75
+ "model_type": "qwen2_5_vl_text",
76
+ "num_attention_heads": 28,
77
+ "num_hidden_layers": 28,
78
+ "num_key_value_heads": 4,
79
+ "rms_norm_eps": 1e-06,
80
+ "rope_scaling": {
81
+ "mrope_section": [
82
+ 16,
83
+ 24,
84
+ 24
85
+ ],
86
+ "rope_type": "default",
87
+ "type": "default"
88
+ },
89
+ "rope_theta": 1000000.0,
90
+ "sliding_window": null,
91
+ "torch_dtype": "float32",
92
+ "use_cache": true,
93
+ "use_sliding_window": false,
94
+ "video_token_id": null,
95
+ "vision_end_token_id": 151653,
96
+ "vision_start_token_id": 151652,
97
+ "vision_token_id": 151654,
98
+ "vocab_size": 152064
99
+ },
100
+ "tie_word_embeddings": false,
101
+ "torch_dtype": "bfloat16",
102
+ "transformers_version": "4.55.2",
103
+ "use_cache": true,
104
+ "use_sliding_window": false,
105
+ "video_token_id": 151656,
106
+ "vision_config": {
107
+ "depth": 32,
108
+ "fullatt_block_indexes": [
109
+ 7,
110
+ 15,
111
+ 23,
112
+ 31
113
+ ],
114
+ "hidden_act": "silu",
115
+ "hidden_size": 1280,
116
+ "in_channels": 3,
117
+ "in_chans": 3,
118
+ "initializer_range": 0.02,
119
+ "intermediate_size": 3420,
120
+ "model_type": "qwen2_5_vl",
121
+ "num_heads": 16,
122
+ "out_hidden_size": 3584,
123
+ "patch_size": 14,
124
+ "spatial_merge_size": 2,
125
+ "spatial_patch_size": 14,
126
+ "temporal_patch_size": 2,
127
+ "tokens_per_second": 2,
128
+ "torch_dtype": "float32",
129
+ "window_size": 112
130
+ },
131
+ "vision_end_token_id": 151653,
132
+ "vision_start_token_id": 151652,
133
+ "vision_token_id": 151654,
134
+ "vocab_size": 152064
135
+ }
text_encoder/generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.05,
10
+ "temperature": 0.1,
11
+ "top_k": 1,
12
+ "top_p": 0.001,
13
+ "transformers_version": "4.55.2"
14
+ }
text_encoder/model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d725335e4ea2399be706469e4b8807716a8fa64bd03468252e9f7acf2415fee4
3
+ size 4968243304
text_encoder/model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1830db6908dcc76df3a71492acbcf2b8cac130114cf1f3c2d9edae8de8c6de3
3
+ size 4991495816
text_encoder/model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09c1807c6d00d7cab94f7db39d4c02ebb8537225ccde383861ac48db97945aa6
3
+ size 4932751040
text_encoder/model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dd068336d14d45ffb43cef374d286cc6ba9d8741b028f90a7d040d847961f4a
3
+ size 1691924384
text_encoder/model.safetensors.index.json ADDED
@@ -0,0 +1,737 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_parameters": 8292166656,
4
+ "total_size": 16584333312
5
+ },
6
+ "weight_map": {
7
+ "lm_head.weight": "model-00004-of-00004.safetensors",
8
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
17
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
18
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
19
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
20
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
21
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
22
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
25
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
26
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
27
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
28
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
29
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
30
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
31
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
32
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
33
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
35
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
36
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
37
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
38
+ "model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
39
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
40
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
41
+ "model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
42
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
43
+ "model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
44
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
49
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
50
+ "model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
51
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
53
+ "model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
54
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
55
+ "model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
56
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
61
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
62
+ "model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
63
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
64
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
65
+ "model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
66
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
67
+ "model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
68
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
71
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
72
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
73
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
74
+ "model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
75
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
77
+ "model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
78
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
79
+ "model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
80
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
81
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
82
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
83
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
84
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
85
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
86
+ "model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
87
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
88
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
89
+ "model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
90
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
91
+ "model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
92
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
93
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
94
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
95
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
96
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
97
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
98
+ "model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
99
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
100
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
101
+ "model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
102
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
103
+ "model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
104
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
105
+ "model.layers.16.input_layernorm.weight": "model-00003-of-00004.safetensors",
106
+ "model.layers.16.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
107
+ "model.layers.16.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
108
+ "model.layers.16.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
109
+ "model.layers.16.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
110
+ "model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
111
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
112
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
113
+ "model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
114
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
115
+ "model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
116
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
117
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
118
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
119
+ "model.layers.17.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
120
+ "model.layers.17.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
121
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
122
+ "model.layers.17.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
123
+ "model.layers.17.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
124
+ "model.layers.17.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
125
+ "model.layers.17.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
126
+ "model.layers.17.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
127
+ "model.layers.17.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
128
+ "model.layers.17.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
129
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
130
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
131
+ "model.layers.18.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
132
+ "model.layers.18.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
133
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
134
+ "model.layers.18.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
135
+ "model.layers.18.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
136
+ "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
137
+ "model.layers.18.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
138
+ "model.layers.18.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
139
+ "model.layers.18.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
140
+ "model.layers.18.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
141
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
142
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
143
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
144
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
145
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
146
+ "model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
147
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
148
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
149
+ "model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
150
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
151
+ "model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
152
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
153
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
154
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
155
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
156
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
157
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
158
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
159
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
160
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
161
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
162
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
163
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
164
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
165
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
166
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
167
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
168
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
169
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
170
+ "model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
171
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
172
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
173
+ "model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
174
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
175
+ "model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
176
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
177
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
178
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
179
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
180
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
181
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
182
+ "model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
183
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
184
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
185
+ "model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
186
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
187
+ "model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
188
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
189
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
190
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
191
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
192
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
193
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
194
+ "model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
195
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
196
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
197
+ "model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
198
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
199
+ "model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
200
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
201
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
202
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
203
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
204
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
205
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
206
+ "model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
207
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
208
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
209
+ "model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
210
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
211
+ "model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
212
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
213
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
214
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
215
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
216
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
217
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
218
+ "model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
219
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
220
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
221
+ "model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
222
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
223
+ "model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
224
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
225
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
226
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
227
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
228
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
229
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
230
+ "model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
231
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
232
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
233
+ "model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
234
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
235
+ "model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
236
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
237
+ "model.layers.26.input_layernorm.weight": "model-00004-of-00004.safetensors",
238
+ "model.layers.26.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
239
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
240
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
241
+ "model.layers.26.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
242
+ "model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
243
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
244
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
245
+ "model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
246
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
247
+ "model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
248
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
249
+ "model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
250
+ "model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
251
+ "model.layers.27.mlp.gate_proj.weight": "model-00004-of-00004.safetensors",
252
+ "model.layers.27.mlp.up_proj.weight": "model-00004-of-00004.safetensors",
253
+ "model.layers.27.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
254
+ "model.layers.27.self_attn.k_proj.bias": "model-00004-of-00004.safetensors",
255
+ "model.layers.27.self_attn.k_proj.weight": "model-00004-of-00004.safetensors",
256
+ "model.layers.27.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
257
+ "model.layers.27.self_attn.q_proj.bias": "model-00004-of-00004.safetensors",
258
+ "model.layers.27.self_attn.q_proj.weight": "model-00004-of-00004.safetensors",
259
+ "model.layers.27.self_attn.v_proj.bias": "model-00004-of-00004.safetensors",
260
+ "model.layers.27.self_attn.v_proj.weight": "model-00004-of-00004.safetensors",
261
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
262
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
263
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
264
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
265
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
266
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
267
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
268
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
269
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
270
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
271
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
272
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
273
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
274
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
275
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
276
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
277
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
278
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
279
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
280
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
281
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
282
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
283
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
284
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
285
+ "model.layers.5.input_layernorm.weight": "model-00002-of-00004.safetensors",
286
+ "model.layers.5.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
287
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
288
+ "model.layers.5.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
289
+ "model.layers.5.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
290
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
291
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
292
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
293
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
294
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
295
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
296
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
297
+ "model.layers.6.input_layernorm.weight": "model-00002-of-00004.safetensors",
298
+ "model.layers.6.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
299
+ "model.layers.6.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
300
+ "model.layers.6.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
301
+ "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
302
+ "model.layers.6.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
303
+ "model.layers.6.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
304
+ "model.layers.6.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
305
+ "model.layers.6.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
306
+ "model.layers.6.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
307
+ "model.layers.6.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
308
+ "model.layers.6.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
309
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
310
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
311
+ "model.layers.7.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
312
+ "model.layers.7.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
313
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
314
+ "model.layers.7.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
315
+ "model.layers.7.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
316
+ "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
317
+ "model.layers.7.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
318
+ "model.layers.7.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
319
+ "model.layers.7.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
320
+ "model.layers.7.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
321
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
322
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
323
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
324
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
325
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
326
+ "model.layers.8.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
327
+ "model.layers.8.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
328
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
329
+ "model.layers.8.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
330
+ "model.layers.8.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
331
+ "model.layers.8.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
332
+ "model.layers.8.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
333
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
334
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
335
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
336
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
337
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
338
+ "model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
339
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
340
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
341
+ "model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
342
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
343
+ "model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
344
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
345
+ "model.norm.weight": "model-00004-of-00004.safetensors",
346
+ "visual.blocks.0.attn.proj.bias": "model-00001-of-00004.safetensors",
347
+ "visual.blocks.0.attn.proj.weight": "model-00001-of-00004.safetensors",
348
+ "visual.blocks.0.attn.qkv.bias": "model-00001-of-00004.safetensors",
349
+ "visual.blocks.0.attn.qkv.weight": "model-00001-of-00004.safetensors",
350
+ "visual.blocks.0.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
351
+ "visual.blocks.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
352
+ "visual.blocks.0.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
353
+ "visual.blocks.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
354
+ "visual.blocks.0.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
355
+ "visual.blocks.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
356
+ "visual.blocks.0.norm1.weight": "model-00001-of-00004.safetensors",
357
+ "visual.blocks.0.norm2.weight": "model-00001-of-00004.safetensors",
358
+ "visual.blocks.1.attn.proj.bias": "model-00001-of-00004.safetensors",
359
+ "visual.blocks.1.attn.proj.weight": "model-00001-of-00004.safetensors",
360
+ "visual.blocks.1.attn.qkv.bias": "model-00001-of-00004.safetensors",
361
+ "visual.blocks.1.attn.qkv.weight": "model-00001-of-00004.safetensors",
362
+ "visual.blocks.1.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
363
+ "visual.blocks.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
364
+ "visual.blocks.1.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
365
+ "visual.blocks.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
366
+ "visual.blocks.1.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
367
+ "visual.blocks.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
368
+ "visual.blocks.1.norm1.weight": "model-00001-of-00004.safetensors",
369
+ "visual.blocks.1.norm2.weight": "model-00001-of-00004.safetensors",
370
+ "visual.blocks.10.attn.proj.bias": "model-00001-of-00004.safetensors",
371
+ "visual.blocks.10.attn.proj.weight": "model-00001-of-00004.safetensors",
372
+ "visual.blocks.10.attn.qkv.bias": "model-00001-of-00004.safetensors",
373
+ "visual.blocks.10.attn.qkv.weight": "model-00001-of-00004.safetensors",
374
+ "visual.blocks.10.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
375
+ "visual.blocks.10.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
376
+ "visual.blocks.10.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
377
+ "visual.blocks.10.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
378
+ "visual.blocks.10.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
379
+ "visual.blocks.10.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
380
+ "visual.blocks.10.norm1.weight": "model-00001-of-00004.safetensors",
381
+ "visual.blocks.10.norm2.weight": "model-00001-of-00004.safetensors",
382
+ "visual.blocks.11.attn.proj.bias": "model-00001-of-00004.safetensors",
383
+ "visual.blocks.11.attn.proj.weight": "model-00001-of-00004.safetensors",
384
+ "visual.blocks.11.attn.qkv.bias": "model-00001-of-00004.safetensors",
385
+ "visual.blocks.11.attn.qkv.weight": "model-00001-of-00004.safetensors",
386
+ "visual.blocks.11.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
387
+ "visual.blocks.11.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
388
+ "visual.blocks.11.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
389
+ "visual.blocks.11.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
390
+ "visual.blocks.11.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
391
+ "visual.blocks.11.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
392
+ "visual.blocks.11.norm1.weight": "model-00001-of-00004.safetensors",
393
+ "visual.blocks.11.norm2.weight": "model-00001-of-00004.safetensors",
394
+ "visual.blocks.12.attn.proj.bias": "model-00001-of-00004.safetensors",
395
+ "visual.blocks.12.attn.proj.weight": "model-00001-of-00004.safetensors",
396
+ "visual.blocks.12.attn.qkv.bias": "model-00001-of-00004.safetensors",
397
+ "visual.blocks.12.attn.qkv.weight": "model-00001-of-00004.safetensors",
398
+ "visual.blocks.12.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
399
+ "visual.blocks.12.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
400
+ "visual.blocks.12.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
401
+ "visual.blocks.12.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
402
+ "visual.blocks.12.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
403
+ "visual.blocks.12.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
404
+ "visual.blocks.12.norm1.weight": "model-00001-of-00004.safetensors",
405
+ "visual.blocks.12.norm2.weight": "model-00001-of-00004.safetensors",
406
+ "visual.blocks.13.attn.proj.bias": "model-00001-of-00004.safetensors",
407
+ "visual.blocks.13.attn.proj.weight": "model-00001-of-00004.safetensors",
408
+ "visual.blocks.13.attn.qkv.bias": "model-00001-of-00004.safetensors",
409
+ "visual.blocks.13.attn.qkv.weight": "model-00001-of-00004.safetensors",
410
+ "visual.blocks.13.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
411
+ "visual.blocks.13.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
412
+ "visual.blocks.13.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
413
+ "visual.blocks.13.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
414
+ "visual.blocks.13.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
415
+ "visual.blocks.13.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
416
+ "visual.blocks.13.norm1.weight": "model-00001-of-00004.safetensors",
417
+ "visual.blocks.13.norm2.weight": "model-00001-of-00004.safetensors",
418
+ "visual.blocks.14.attn.proj.bias": "model-00001-of-00004.safetensors",
419
+ "visual.blocks.14.attn.proj.weight": "model-00001-of-00004.safetensors",
420
+ "visual.blocks.14.attn.qkv.bias": "model-00001-of-00004.safetensors",
421
+ "visual.blocks.14.attn.qkv.weight": "model-00001-of-00004.safetensors",
422
+ "visual.blocks.14.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
423
+ "visual.blocks.14.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
424
+ "visual.blocks.14.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
425
+ "visual.blocks.14.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
426
+ "visual.blocks.14.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
427
+ "visual.blocks.14.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
428
+ "visual.blocks.14.norm1.weight": "model-00001-of-00004.safetensors",
429
+ "visual.blocks.14.norm2.weight": "model-00001-of-00004.safetensors",
430
+ "visual.blocks.15.attn.proj.bias": "model-00001-of-00004.safetensors",
431
+ "visual.blocks.15.attn.proj.weight": "model-00001-of-00004.safetensors",
432
+ "visual.blocks.15.attn.qkv.bias": "model-00001-of-00004.safetensors",
433
+ "visual.blocks.15.attn.qkv.weight": "model-00001-of-00004.safetensors",
434
+ "visual.blocks.15.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
435
+ "visual.blocks.15.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
436
+ "visual.blocks.15.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
437
+ "visual.blocks.15.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
438
+ "visual.blocks.15.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
439
+ "visual.blocks.15.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
440
+ "visual.blocks.15.norm1.weight": "model-00001-of-00004.safetensors",
441
+ "visual.blocks.15.norm2.weight": "model-00001-of-00004.safetensors",
442
+ "visual.blocks.16.attn.proj.bias": "model-00001-of-00004.safetensors",
443
+ "visual.blocks.16.attn.proj.weight": "model-00001-of-00004.safetensors",
444
+ "visual.blocks.16.attn.qkv.bias": "model-00001-of-00004.safetensors",
445
+ "visual.blocks.16.attn.qkv.weight": "model-00001-of-00004.safetensors",
446
+ "visual.blocks.16.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
447
+ "visual.blocks.16.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
448
+ "visual.blocks.16.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
449
+ "visual.blocks.16.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
450
+ "visual.blocks.16.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
451
+ "visual.blocks.16.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
452
+ "visual.blocks.16.norm1.weight": "model-00001-of-00004.safetensors",
453
+ "visual.blocks.16.norm2.weight": "model-00001-of-00004.safetensors",
454
+ "visual.blocks.17.attn.proj.bias": "model-00001-of-00004.safetensors",
455
+ "visual.blocks.17.attn.proj.weight": "model-00001-of-00004.safetensors",
456
+ "visual.blocks.17.attn.qkv.bias": "model-00001-of-00004.safetensors",
457
+ "visual.blocks.17.attn.qkv.weight": "model-00001-of-00004.safetensors",
458
+ "visual.blocks.17.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
459
+ "visual.blocks.17.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
460
+ "visual.blocks.17.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
461
+ "visual.blocks.17.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
462
+ "visual.blocks.17.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
463
+ "visual.blocks.17.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
464
+ "visual.blocks.17.norm1.weight": "model-00001-of-00004.safetensors",
465
+ "visual.blocks.17.norm2.weight": "model-00001-of-00004.safetensors",
466
+ "visual.blocks.18.attn.proj.bias": "model-00001-of-00004.safetensors",
467
+ "visual.blocks.18.attn.proj.weight": "model-00001-of-00004.safetensors",
468
+ "visual.blocks.18.attn.qkv.bias": "model-00001-of-00004.safetensors",
469
+ "visual.blocks.18.attn.qkv.weight": "model-00001-of-00004.safetensors",
470
+ "visual.blocks.18.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
471
+ "visual.blocks.18.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
472
+ "visual.blocks.18.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
473
+ "visual.blocks.18.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
474
+ "visual.blocks.18.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
475
+ "visual.blocks.18.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
476
+ "visual.blocks.18.norm1.weight": "model-00001-of-00004.safetensors",
477
+ "visual.blocks.18.norm2.weight": "model-00001-of-00004.safetensors",
478
+ "visual.blocks.19.attn.proj.bias": "model-00001-of-00004.safetensors",
479
+ "visual.blocks.19.attn.proj.weight": "model-00001-of-00004.safetensors",
480
+ "visual.blocks.19.attn.qkv.bias": "model-00001-of-00004.safetensors",
481
+ "visual.blocks.19.attn.qkv.weight": "model-00001-of-00004.safetensors",
482
+ "visual.blocks.19.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
483
+ "visual.blocks.19.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
484
+ "visual.blocks.19.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
485
+ "visual.blocks.19.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
486
+ "visual.blocks.19.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
487
+ "visual.blocks.19.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
488
+ "visual.blocks.19.norm1.weight": "model-00001-of-00004.safetensors",
489
+ "visual.blocks.19.norm2.weight": "model-00001-of-00004.safetensors",
490
+ "visual.blocks.2.attn.proj.bias": "model-00001-of-00004.safetensors",
491
+ "visual.blocks.2.attn.proj.weight": "model-00001-of-00004.safetensors",
492
+ "visual.blocks.2.attn.qkv.bias": "model-00001-of-00004.safetensors",
493
+ "visual.blocks.2.attn.qkv.weight": "model-00001-of-00004.safetensors",
494
+ "visual.blocks.2.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
495
+ "visual.blocks.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
496
+ "visual.blocks.2.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
497
+ "visual.blocks.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
498
+ "visual.blocks.2.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
499
+ "visual.blocks.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
500
+ "visual.blocks.2.norm1.weight": "model-00001-of-00004.safetensors",
501
+ "visual.blocks.2.norm2.weight": "model-00001-of-00004.safetensors",
502
+ "visual.blocks.20.attn.proj.bias": "model-00001-of-00004.safetensors",
503
+ "visual.blocks.20.attn.proj.weight": "model-00001-of-00004.safetensors",
504
+ "visual.blocks.20.attn.qkv.bias": "model-00001-of-00004.safetensors",
505
+ "visual.blocks.20.attn.qkv.weight": "model-00001-of-00004.safetensors",
506
+ "visual.blocks.20.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
507
+ "visual.blocks.20.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
508
+ "visual.blocks.20.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
509
+ "visual.blocks.20.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
510
+ "visual.blocks.20.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
511
+ "visual.blocks.20.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
512
+ "visual.blocks.20.norm1.weight": "model-00001-of-00004.safetensors",
513
+ "visual.blocks.20.norm2.weight": "model-00001-of-00004.safetensors",
514
+ "visual.blocks.21.attn.proj.bias": "model-00001-of-00004.safetensors",
515
+ "visual.blocks.21.attn.proj.weight": "model-00001-of-00004.safetensors",
516
+ "visual.blocks.21.attn.qkv.bias": "model-00001-of-00004.safetensors",
517
+ "visual.blocks.21.attn.qkv.weight": "model-00001-of-00004.safetensors",
518
+ "visual.blocks.21.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
519
+ "visual.blocks.21.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
520
+ "visual.blocks.21.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
521
+ "visual.blocks.21.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
522
+ "visual.blocks.21.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
523
+ "visual.blocks.21.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
524
+ "visual.blocks.21.norm1.weight": "model-00001-of-00004.safetensors",
525
+ "visual.blocks.21.norm2.weight": "model-00001-of-00004.safetensors",
526
+ "visual.blocks.22.attn.proj.bias": "model-00001-of-00004.safetensors",
527
+ "visual.blocks.22.attn.proj.weight": "model-00001-of-00004.safetensors",
528
+ "visual.blocks.22.attn.qkv.bias": "model-00001-of-00004.safetensors",
529
+ "visual.blocks.22.attn.qkv.weight": "model-00001-of-00004.safetensors",
530
+ "visual.blocks.22.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
531
+ "visual.blocks.22.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
532
+ "visual.blocks.22.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
533
+ "visual.blocks.22.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
534
+ "visual.blocks.22.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
535
+ "visual.blocks.22.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
536
+ "visual.blocks.22.norm1.weight": "model-00001-of-00004.safetensors",
537
+ "visual.blocks.22.norm2.weight": "model-00001-of-00004.safetensors",
538
+ "visual.blocks.23.attn.proj.bias": "model-00001-of-00004.safetensors",
539
+ "visual.blocks.23.attn.proj.weight": "model-00001-of-00004.safetensors",
540
+ "visual.blocks.23.attn.qkv.bias": "model-00001-of-00004.safetensors",
541
+ "visual.blocks.23.attn.qkv.weight": "model-00001-of-00004.safetensors",
542
+ "visual.blocks.23.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
543
+ "visual.blocks.23.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
544
+ "visual.blocks.23.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
545
+ "visual.blocks.23.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
546
+ "visual.blocks.23.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
547
+ "visual.blocks.23.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
548
+ "visual.blocks.23.norm1.weight": "model-00001-of-00004.safetensors",
549
+ "visual.blocks.23.norm2.weight": "model-00001-of-00004.safetensors",
550
+ "visual.blocks.24.attn.proj.bias": "model-00001-of-00004.safetensors",
551
+ "visual.blocks.24.attn.proj.weight": "model-00001-of-00004.safetensors",
552
+ "visual.blocks.24.attn.qkv.bias": "model-00001-of-00004.safetensors",
553
+ "visual.blocks.24.attn.qkv.weight": "model-00001-of-00004.safetensors",
554
+ "visual.blocks.24.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
555
+ "visual.blocks.24.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
556
+ "visual.blocks.24.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
557
+ "visual.blocks.24.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
558
+ "visual.blocks.24.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
559
+ "visual.blocks.24.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
560
+ "visual.blocks.24.norm1.weight": "model-00001-of-00004.safetensors",
561
+ "visual.blocks.24.norm2.weight": "model-00001-of-00004.safetensors",
562
+ "visual.blocks.25.attn.proj.bias": "model-00001-of-00004.safetensors",
563
+ "visual.blocks.25.attn.proj.weight": "model-00001-of-00004.safetensors",
564
+ "visual.blocks.25.attn.qkv.bias": "model-00001-of-00004.safetensors",
565
+ "visual.blocks.25.attn.qkv.weight": "model-00001-of-00004.safetensors",
566
+ "visual.blocks.25.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
567
+ "visual.blocks.25.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
568
+ "visual.blocks.25.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
569
+ "visual.blocks.25.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
570
+ "visual.blocks.25.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
571
+ "visual.blocks.25.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
572
+ "visual.blocks.25.norm1.weight": "model-00001-of-00004.safetensors",
573
+ "visual.blocks.25.norm2.weight": "model-00001-of-00004.safetensors",
574
+ "visual.blocks.26.attn.proj.bias": "model-00001-of-00004.safetensors",
575
+ "visual.blocks.26.attn.proj.weight": "model-00001-of-00004.safetensors",
576
+ "visual.blocks.26.attn.qkv.bias": "model-00001-of-00004.safetensors",
577
+ "visual.blocks.26.attn.qkv.weight": "model-00001-of-00004.safetensors",
578
+ "visual.blocks.26.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
579
+ "visual.blocks.26.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
580
+ "visual.blocks.26.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
581
+ "visual.blocks.26.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
582
+ "visual.blocks.26.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
583
+ "visual.blocks.26.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
584
+ "visual.blocks.26.norm1.weight": "model-00001-of-00004.safetensors",
585
+ "visual.blocks.26.norm2.weight": "model-00001-of-00004.safetensors",
586
+ "visual.blocks.27.attn.proj.bias": "model-00001-of-00004.safetensors",
587
+ "visual.blocks.27.attn.proj.weight": "model-00001-of-00004.safetensors",
588
+ "visual.blocks.27.attn.qkv.bias": "model-00001-of-00004.safetensors",
589
+ "visual.blocks.27.attn.qkv.weight": "model-00001-of-00004.safetensors",
590
+ "visual.blocks.27.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
591
+ "visual.blocks.27.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
592
+ "visual.blocks.27.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
593
+ "visual.blocks.27.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
594
+ "visual.blocks.27.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
595
+ "visual.blocks.27.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
596
+ "visual.blocks.27.norm1.weight": "model-00001-of-00004.safetensors",
597
+ "visual.blocks.27.norm2.weight": "model-00001-of-00004.safetensors",
598
+ "visual.blocks.28.attn.proj.bias": "model-00001-of-00004.safetensors",
599
+ "visual.blocks.28.attn.proj.weight": "model-00001-of-00004.safetensors",
600
+ "visual.blocks.28.attn.qkv.bias": "model-00001-of-00004.safetensors",
601
+ "visual.blocks.28.attn.qkv.weight": "model-00001-of-00004.safetensors",
602
+ "visual.blocks.28.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
603
+ "visual.blocks.28.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
604
+ "visual.blocks.28.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
605
+ "visual.blocks.28.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
606
+ "visual.blocks.28.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
607
+ "visual.blocks.28.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
608
+ "visual.blocks.28.norm1.weight": "model-00001-of-00004.safetensors",
609
+ "visual.blocks.28.norm2.weight": "model-00001-of-00004.safetensors",
610
+ "visual.blocks.29.attn.proj.bias": "model-00001-of-00004.safetensors",
611
+ "visual.blocks.29.attn.proj.weight": "model-00001-of-00004.safetensors",
612
+ "visual.blocks.29.attn.qkv.bias": "model-00001-of-00004.safetensors",
613
+ "visual.blocks.29.attn.qkv.weight": "model-00001-of-00004.safetensors",
614
+ "visual.blocks.29.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
615
+ "visual.blocks.29.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
616
+ "visual.blocks.29.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
617
+ "visual.blocks.29.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
618
+ "visual.blocks.29.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
619
+ "visual.blocks.29.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
620
+ "visual.blocks.29.norm1.weight": "model-00001-of-00004.safetensors",
621
+ "visual.blocks.29.norm2.weight": "model-00001-of-00004.safetensors",
622
+ "visual.blocks.3.attn.proj.bias": "model-00001-of-00004.safetensors",
623
+ "visual.blocks.3.attn.proj.weight": "model-00001-of-00004.safetensors",
624
+ "visual.blocks.3.attn.qkv.bias": "model-00001-of-00004.safetensors",
625
+ "visual.blocks.3.attn.qkv.weight": "model-00001-of-00004.safetensors",
626
+ "visual.blocks.3.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
627
+ "visual.blocks.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
628
+ "visual.blocks.3.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
629
+ "visual.blocks.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
630
+ "visual.blocks.3.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
631
+ "visual.blocks.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
632
+ "visual.blocks.3.norm1.weight": "model-00001-of-00004.safetensors",
633
+ "visual.blocks.3.norm2.weight": "model-00001-of-00004.safetensors",
634
+ "visual.blocks.30.attn.proj.bias": "model-00001-of-00004.safetensors",
635
+ "visual.blocks.30.attn.proj.weight": "model-00001-of-00004.safetensors",
636
+ "visual.blocks.30.attn.qkv.bias": "model-00001-of-00004.safetensors",
637
+ "visual.blocks.30.attn.qkv.weight": "model-00001-of-00004.safetensors",
638
+ "visual.blocks.30.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
639
+ "visual.blocks.30.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
640
+ "visual.blocks.30.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
641
+ "visual.blocks.30.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
642
+ "visual.blocks.30.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
643
+ "visual.blocks.30.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
644
+ "visual.blocks.30.norm1.weight": "model-00001-of-00004.safetensors",
645
+ "visual.blocks.30.norm2.weight": "model-00001-of-00004.safetensors",
646
+ "visual.blocks.31.attn.proj.bias": "model-00001-of-00004.safetensors",
647
+ "visual.blocks.31.attn.proj.weight": "model-00001-of-00004.safetensors",
648
+ "visual.blocks.31.attn.qkv.bias": "model-00001-of-00004.safetensors",
649
+ "visual.blocks.31.attn.qkv.weight": "model-00001-of-00004.safetensors",
650
+ "visual.blocks.31.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
651
+ "visual.blocks.31.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
652
+ "visual.blocks.31.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
653
+ "visual.blocks.31.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
654
+ "visual.blocks.31.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
655
+ "visual.blocks.31.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
656
+ "visual.blocks.31.norm1.weight": "model-00001-of-00004.safetensors",
657
+ "visual.blocks.31.norm2.weight": "model-00001-of-00004.safetensors",
658
+ "visual.blocks.4.attn.proj.bias": "model-00001-of-00004.safetensors",
659
+ "visual.blocks.4.attn.proj.weight": "model-00001-of-00004.safetensors",
660
+ "visual.blocks.4.attn.qkv.bias": "model-00001-of-00004.safetensors",
661
+ "visual.blocks.4.attn.qkv.weight": "model-00001-of-00004.safetensors",
662
+ "visual.blocks.4.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
663
+ "visual.blocks.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
664
+ "visual.blocks.4.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
665
+ "visual.blocks.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
666
+ "visual.blocks.4.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
667
+ "visual.blocks.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
668
+ "visual.blocks.4.norm1.weight": "model-00001-of-00004.safetensors",
669
+ "visual.blocks.4.norm2.weight": "model-00001-of-00004.safetensors",
670
+ "visual.blocks.5.attn.proj.bias": "model-00001-of-00004.safetensors",
671
+ "visual.blocks.5.attn.proj.weight": "model-00001-of-00004.safetensors",
672
+ "visual.blocks.5.attn.qkv.bias": "model-00001-of-00004.safetensors",
673
+ "visual.blocks.5.attn.qkv.weight": "model-00001-of-00004.safetensors",
674
+ "visual.blocks.5.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
675
+ "visual.blocks.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
676
+ "visual.blocks.5.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
677
+ "visual.blocks.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
678
+ "visual.blocks.5.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
679
+ "visual.blocks.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
680
+ "visual.blocks.5.norm1.weight": "model-00001-of-00004.safetensors",
681
+ "visual.blocks.5.norm2.weight": "model-00001-of-00004.safetensors",
682
+ "visual.blocks.6.attn.proj.bias": "model-00001-of-00004.safetensors",
683
+ "visual.blocks.6.attn.proj.weight": "model-00001-of-00004.safetensors",
684
+ "visual.blocks.6.attn.qkv.bias": "model-00001-of-00004.safetensors",
685
+ "visual.blocks.6.attn.qkv.weight": "model-00001-of-00004.safetensors",
686
+ "visual.blocks.6.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
687
+ "visual.blocks.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
688
+ "visual.blocks.6.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
689
+ "visual.blocks.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
690
+ "visual.blocks.6.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
691
+ "visual.blocks.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
692
+ "visual.blocks.6.norm1.weight": "model-00001-of-00004.safetensors",
693
+ "visual.blocks.6.norm2.weight": "model-00001-of-00004.safetensors",
694
+ "visual.blocks.7.attn.proj.bias": "model-00001-of-00004.safetensors",
695
+ "visual.blocks.7.attn.proj.weight": "model-00001-of-00004.safetensors",
696
+ "visual.blocks.7.attn.qkv.bias": "model-00001-of-00004.safetensors",
697
+ "visual.blocks.7.attn.qkv.weight": "model-00001-of-00004.safetensors",
698
+ "visual.blocks.7.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
699
+ "visual.blocks.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
700
+ "visual.blocks.7.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
701
+ "visual.blocks.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
702
+ "visual.blocks.7.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
703
+ "visual.blocks.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
704
+ "visual.blocks.7.norm1.weight": "model-00001-of-00004.safetensors",
705
+ "visual.blocks.7.norm2.weight": "model-00001-of-00004.safetensors",
706
+ "visual.blocks.8.attn.proj.bias": "model-00001-of-00004.safetensors",
707
+ "visual.blocks.8.attn.proj.weight": "model-00001-of-00004.safetensors",
708
+ "visual.blocks.8.attn.qkv.bias": "model-00001-of-00004.safetensors",
709
+ "visual.blocks.8.attn.qkv.weight": "model-00001-of-00004.safetensors",
710
+ "visual.blocks.8.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
711
+ "visual.blocks.8.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
712
+ "visual.blocks.8.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
713
+ "visual.blocks.8.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
714
+ "visual.blocks.8.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
715
+ "visual.blocks.8.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
716
+ "visual.blocks.8.norm1.weight": "model-00001-of-00004.safetensors",
717
+ "visual.blocks.8.norm2.weight": "model-00001-of-00004.safetensors",
718
+ "visual.blocks.9.attn.proj.bias": "model-00001-of-00004.safetensors",
719
+ "visual.blocks.9.attn.proj.weight": "model-00001-of-00004.safetensors",
720
+ "visual.blocks.9.attn.qkv.bias": "model-00001-of-00004.safetensors",
721
+ "visual.blocks.9.attn.qkv.weight": "model-00001-of-00004.safetensors",
722
+ "visual.blocks.9.mlp.down_proj.bias": "model-00001-of-00004.safetensors",
723
+ "visual.blocks.9.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
724
+ "visual.blocks.9.mlp.gate_proj.bias": "model-00001-of-00004.safetensors",
725
+ "visual.blocks.9.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
726
+ "visual.blocks.9.mlp.up_proj.bias": "model-00001-of-00004.safetensors",
727
+ "visual.blocks.9.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
728
+ "visual.blocks.9.norm1.weight": "model-00001-of-00004.safetensors",
729
+ "visual.blocks.9.norm2.weight": "model-00001-of-00004.safetensors",
730
+ "visual.merger.ln_q.weight": "model-00001-of-00004.safetensors",
731
+ "visual.merger.mlp.0.bias": "model-00001-of-00004.safetensors",
732
+ "visual.merger.mlp.0.weight": "model-00001-of-00004.safetensors",
733
+ "visual.merger.mlp.2.bias": "model-00001-of-00004.safetensors",
734
+ "visual.merger.mlp.2.weight": "model-00001-of-00004.safetensors",
735
+ "visual.patch_embed.proj.weight": "model-00001-of-00004.safetensors"
736
+ }
737
+ }
tokenizer/added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
tokenizer/chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "clean_up_tokenization_spaces": false,
199
+ "eos_token": "<|im_end|>",
200
+ "errors": "replace",
201
+ "extra_special_tokens": {},
202
+ "model_max_length": 131072,
203
+ "pad_token": "<|endoftext|>",
204
+ "split_special_tokens": false,
205
+ "tokenizer_class": "Qwen2Tokenizer",
206
+ "unk_token": null
207
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
transformer/config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "QwenImageTransformer2DModel",
3
+ "_diffusers_version": "0.35.0.dev0",
4
+ "attention_head_dim": 128,
5
+ "axes_dims_rope": [
6
+ 16,
7
+ 56,
8
+ 56
9
+ ],
10
+ "guidance_embeds": false,
11
+ "in_channels": 64,
12
+ "joint_attention_dim": 3584,
13
+ "num_attention_heads": 24,
14
+ "num_layers": 60,
15
+ "out_channels": 16,
16
+ "patch_size": 2
17
+ }
transformer/diffusion_pytorch_model-00001-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2003cdd961d3a94897f70a49c4cda975e9f92dd938adfeeba4f335f695570310
3
+ size 4989364312
transformer/diffusion_pytorch_model-00002-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d20abf545941bbffe26e201276226332e6ad6a2d3d7a7a18fb45ec7006fcdc8a
3
+ size 4984214160
transformer/diffusion_pytorch_model-00003-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e4d15c80d388b630a97347694914cbc58ea9dc40e472305add52981ca02e856
3
+ size 4946470000
transformer/diffusion_pytorch_model-00004-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5baa4c61fbce2011ea179122a8dc4bfaefb5387feeee5eff765fb237977cbd1
3
+ size 4984213736
transformer/diffusion_pytorch_model-00005-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c08a2489c7e11f09c9fd4aae929085a31199699f1fa4a2dd95fc65d1ab5c223c
3
+ size 4946471896
transformer/diffusion_pytorch_model-00006-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:901dfafc0de496f358e27b81af4155b889080aebd4417909303c70a260885fff
3
+ size 4946451560
transformer/diffusion_pytorch_model-00007-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8d9a14e343d841e425bb4f68c4be44b96d94730d328a175f4549bdde9114c96
3
+ size 4908690520
transformer/diffusion_pytorch_model-00008-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f57b82e08f23a33af5a2a6dbf586180b4f5c9e69487d49112544ca5e8fe0d94e
3
+ size 4984232856
transformer/diffusion_pytorch_model-00009-of-00009.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b068de3d8644da55bc75f156ba50cd374f397f2fc49c8e8e00c0392db7cd3a70
3
+ size 1170918840
transformer/diffusion_pytorch_model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
vae/config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKLQwenImage",
3
+ "_diffusers_version": "0.35.0.dev0",
4
+ "attn_scales": [],
5
+ "base_dim": 96,
6
+ "dim_mult": [
7
+ 1,
8
+ 2,
9
+ 4,
10
+ 4
11
+ ],
12
+ "dropout": 0.0,
13
+ "latents_mean": [
14
+ -0.7571,
15
+ -0.7089,
16
+ -0.9113,
17
+ 0.1075,
18
+ -0.1745,
19
+ 0.9653,
20
+ -0.1517,
21
+ 1.5508,
22
+ 0.4134,
23
+ -0.0715,
24
+ 0.5517,
25
+ -0.3632,
26
+ -0.1922,
27
+ -0.9497,
28
+ 0.2503,
29
+ -0.2921
30
+ ],
31
+ "latents_std": [
32
+ 2.8184,
33
+ 1.4541,
34
+ 2.3275,
35
+ 2.6558,
36
+ 1.2196,
37
+ 1.7708,
38
+ 2.6052,
39
+ 2.0743,
40
+ 3.2687,
41
+ 2.1526,
42
+ 2.8652,
43
+ 1.5579,
44
+ 1.6382,
45
+ 1.1253,
46
+ 2.8251,
47
+ 1.916
48
+ ],
49
+ "num_res_blocks": 2,
50
+ "temperal_downsample": [
51
+ false,
52
+ true,
53
+ true
54
+ ],
55
+ "z_dim": 16
56
+ }
vae/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c8bc8b758c649abef9ea407b95408389a3b2f610d0d10fcb054fe171d0a8344
3
+ size 253806966