Image-Text-to-Text
Transformers
Safetensors
multilingual
minicpmv
feature-extraction
minicpm-v
vision
ocr
multi-image
video
custom_code
conversational
4-bit precision
bitsandbytes
Instructions to use openbmb/MiniCPM-V-2_6-int4 with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use openbmb/MiniCPM-V-2_6-int4 with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("image-text-to-text", model="openbmb/MiniCPM-V-2_6-int4", trust_remote_code=True) messages = [ { "role": "user", "content": [ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"}, {"type": "text", "text": "What animal is on the candy?"} ] }, ] pipe(text=messages)# Load model directly from transformers import AutoModel model = AutoModel.from_pretrained("openbmb/MiniCPM-V-2_6-int4", trust_remote_code=True, dtype="auto") - Notebooks
- Google Colab
- Kaggle
- Local Apps
- vLLM
How to use openbmb/MiniCPM-V-2_6-int4 with vLLM:
Install from pip and serve model
# Install vLLM from pip: pip install vllm # Start the vLLM server: vllm serve "openbmb/MiniCPM-V-2_6-int4" # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:8000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "openbmb/MiniCPM-V-2_6-int4", "messages": [ { "role": "user", "content": [ { "type": "text", "text": "Describe this image in one sentence." }, { "type": "image_url", "image_url": { "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" } } ] } ] }'Use Docker
docker model run hf.co/openbmb/MiniCPM-V-2_6-int4
- SGLang
How to use openbmb/MiniCPM-V-2_6-int4 with SGLang:
Install from pip and serve model
# Install SGLang from pip: pip install sglang # Start the SGLang server: python3 -m sglang.launch_server \ --model-path "openbmb/MiniCPM-V-2_6-int4" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "openbmb/MiniCPM-V-2_6-int4", "messages": [ { "role": "user", "content": [ { "type": "text", "text": "Describe this image in one sentence." }, { "type": "image_url", "image_url": { "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" } } ] } ] }'Use Docker images
docker run --gpus all \ --shm-size 32g \ -p 30000:30000 \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HF_TOKEN=<secret>" \ --ipc=host \ lmsysorg/sglang:latest \ python3 -m sglang.launch_server \ --model-path "openbmb/MiniCPM-V-2_6-int4" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "openbmb/MiniCPM-V-2_6-int4", "messages": [ { "role": "user", "content": [ { "type": "text", "text": "Describe this image in one sentence." }, { "type": "image_url", "image_url": { "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" } } ] } ] }' - Docker Model Runner
How to use openbmb/MiniCPM-V-2_6-int4 with Docker Model Runner:
docker model run hf.co/openbmb/MiniCPM-V-2_6-int4
Add vision_batch_size to avoid cuda OOM
Browse files- configuration_minicpm.py +3 -1
- modeling_minicpmv.py +27 -25
configuration_minicpm.py
CHANGED
|
@@ -69,6 +69,7 @@ class MiniCPMVConfig(Qwen2Config):
|
|
| 69 |
slice_config=None,
|
| 70 |
vision_config=None,
|
| 71 |
use_image_id=True,
|
|
|
|
| 72 |
**kwargs,
|
| 73 |
):
|
| 74 |
self.use_cache = use_cache
|
|
@@ -77,6 +78,7 @@ class MiniCPMVConfig(Qwen2Config):
|
|
| 77 |
self.drop_vision_last_layer = drop_vision_last_layer
|
| 78 |
self.batch_vision_input = batch_vision_input
|
| 79 |
self.use_image_id = use_image_id
|
|
|
|
| 80 |
|
| 81 |
if slice_config is None:
|
| 82 |
self.slice_config = MiniCPMVSliceConfig(max_slice_nums=1)
|
|
@@ -95,4 +97,4 @@ class MiniCPMVConfig(Qwen2Config):
|
|
| 95 |
|
| 96 |
self.patch_size = self.vision_config.patch_size
|
| 97 |
|
| 98 |
-
super().__init__(**kwargs)
|
|
|
|
| 69 |
slice_config=None,
|
| 70 |
vision_config=None,
|
| 71 |
use_image_id=True,
|
| 72 |
+
vision_batch_size=16,
|
| 73 |
**kwargs,
|
| 74 |
):
|
| 75 |
self.use_cache = use_cache
|
|
|
|
| 78 |
self.drop_vision_last_layer = drop_vision_last_layer
|
| 79 |
self.batch_vision_input = batch_vision_input
|
| 80 |
self.use_image_id = use_image_id
|
| 81 |
+
self.vision_batch_size = vision_batch_size
|
| 82 |
|
| 83 |
if slice_config is None:
|
| 84 |
self.slice_config = MiniCPMVSliceConfig(max_slice_nums=1)
|
|
|
|
| 97 |
|
| 98 |
self.patch_size = self.vision_config.patch_size
|
| 99 |
|
| 100 |
+
super().__init__(**kwargs)
|
modeling_minicpmv.py
CHANGED
|
@@ -92,31 +92,30 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
|
|
| 92 |
tgt_sizes = [tgt_size for tgt_size in tgt_sizes if isinstance(tgt_size, torch.Tensor)]
|
| 93 |
tgt_sizes = torch.vstack(tgt_sizes).type(torch.int32)
|
| 94 |
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
else:
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
for single_tgt_size, single_pixel_values in zip(tgt_sizes, all_pixel_values):
|
| 113 |
-
single_pixel_values = single_pixel_values.unsqueeze(0)
|
| 114 |
-
B, L, _ = single_pixel_values.shape
|
| 115 |
-
single_pixel_values = single_pixel_values.permute(0, 2, 1).reshape(B, 3, -1, L)
|
| 116 |
-
single_vision_embedding = self.vpm(single_pixel_values.type(dtype), tgt_sizes=single_tgt_size.unsqueeze(0)).last_hidden_state
|
| 117 |
-
single_vision_embedding = self.resampler(single_vision_embedding, single_tgt_size.unsqueeze(0))
|
| 118 |
-
vision_embedding.append(single_vision_embedding)
|
| 119 |
-
vision_embedding = torch.vstack(vision_embedding)
|
| 120 |
|
| 121 |
start = 0
|
| 122 |
for pixel_values in pixel_values_list:
|
|
@@ -273,7 +272,7 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
|
|
| 273 |
tokenizer,
|
| 274 |
processor=None,
|
| 275 |
vision_hidden_states=None,
|
| 276 |
-
max_new_tokens=
|
| 277 |
min_new_tokens=0,
|
| 278 |
sampling=True,
|
| 279 |
max_inp_length=8192,
|
|
@@ -292,6 +291,9 @@ class MiniCPMV(MiniCPMVPreTrainedModel):
|
|
| 292 |
|
| 293 |
if batched is False:
|
| 294 |
images_list, msgs_list = [images_list], [msgs_list]
|
|
|
|
|
|
|
|
|
|
| 295 |
assert len(images_list) == len(msgs_list), "The batch dim of images_list and msgs_list should be the same."
|
| 296 |
|
| 297 |
if processor is None:
|
|
|
|
| 92 |
tgt_sizes = [tgt_size for tgt_size in tgt_sizes if isinstance(tgt_size, torch.Tensor)]
|
| 93 |
tgt_sizes = torch.vstack(tgt_sizes).type(torch.int32)
|
| 94 |
|
| 95 |
+
max_patches = torch.max(tgt_sizes[:, 0] * tgt_sizes[:, 1])
|
| 96 |
+
|
| 97 |
+
all_pixel_values = torch.nn.utils.rnn.pad_sequence(all_pixel_values, batch_first=True,
|
| 98 |
+
padding_value=0.0)
|
| 99 |
+
B, L, _ = all_pixel_values.shape
|
| 100 |
+
all_pixel_values = all_pixel_values.permute(0, 2, 1).reshape(B, 3, -1, L)
|
| 101 |
+
|
| 102 |
+
patch_attn_mask = torch.zeros((B, 1, max_patches), dtype=torch.bool, device=device)
|
| 103 |
+
for i in range(B):
|
| 104 |
+
patch_attn_mask[i, 0, :tgt_sizes[i][0] * tgt_sizes[i][1]] = True
|
| 105 |
+
|
| 106 |
+
vision_batch_size = self.config.vision_batch_size
|
| 107 |
+
all_pixel_values = all_pixel_values.type(dtype)
|
| 108 |
+
if B > vision_batch_size:
|
| 109 |
+
hs = []
|
| 110 |
+
for i in range(0, B, vision_batch_size):
|
| 111 |
+
start_idx = i
|
| 112 |
+
end_idx = i + vision_batch_size
|
| 113 |
+
tmp_hs = self.vpm(all_pixel_values[start_idx:end_idx], patch_attention_mask=patch_attn_mask[start_idx:end_idx], tgt_sizes=tgt_sizes[start_idx:end_idx]).last_hidden_state
|
| 114 |
+
hs.append(tmp_hs)
|
| 115 |
+
vision_embedding = torch.cat(hs, dim=0)
|
| 116 |
else:
|
| 117 |
+
vision_embedding = self.vpm(all_pixel_values, patch_attention_mask=patch_attn_mask, tgt_sizes=tgt_sizes).last_hidden_state
|
| 118 |
+
vision_embedding = self.resampler(vision_embedding, tgt_sizes)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
|
| 120 |
start = 0
|
| 121 |
for pixel_values in pixel_values_list:
|
|
|
|
| 272 |
tokenizer,
|
| 273 |
processor=None,
|
| 274 |
vision_hidden_states=None,
|
| 275 |
+
max_new_tokens=2048,
|
| 276 |
min_new_tokens=0,
|
| 277 |
sampling=True,
|
| 278 |
max_inp_length=8192,
|
|
|
|
| 291 |
|
| 292 |
if batched is False:
|
| 293 |
images_list, msgs_list = [images_list], [msgs_list]
|
| 294 |
+
else:
|
| 295 |
+
assert images_list is None, "Please integrate image to msgs when using batch inference."
|
| 296 |
+
images_list = [None] * len(msgs_list)
|
| 297 |
assert len(images_list) == len(msgs_list), "The batch dim of images_list and msgs_list should be the same."
|
| 298 |
|
| 299 |
if processor is None:
|