Unable to run this on Kaggle
Installation cell
!pip install "transformers==4.51.0" accelerate "torch>=2.3.0,<=2.8.0" "torchaudio<=2.8.0" "minicpmo-utils>=1.0.2" autoawq
Code cell
import torch
from PIL import Image
from transformers import AutoModel
model = AutoModel.from_pretrained(
"openbmb/MiniCPM-o-4_5-awq",
trust_remote_code=True,
attn_implementation="sdpa", # or "flash_attention_2"
torch_dtype=torch.bfloat16,
init_vision=True,
init_audio=False,
init_tts=False,
)
model.eval().cuda()
image = Image.open("/kaggle/input/eng-plate-obb-1/dataset/test/images/20251230_JH10U6363.jpg").convert("RGB")
question = "What is in the image?"
msgs = [{"role": "user", "content": [image, question]}]
enable_thinking=False # If enable_thinking=True, the thinking mode is enabled.
stream=False # If stream=True, return string generator
res = model.chat(msgs=msgs, use_tts_template=False, enable_thinking=enable_thinking, stream=stream)
print(res)
Error
ImportError Traceback (most recent call last)
/tmp/ipykernel_55/2851898344.py in <cell line: 0>()
3 from transformers import AutoModel
4
----> 5 model = AutoModel.from_pretrained(
6 "openbmb/MiniCPM-o-4_5-awq",
7 trust_remote_code=True,
/usr/local/lib/python3.12/dist-packages/transformers/models/auto/auto_factory.py in from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
547 has_remote_code = hasattr(config, "auto_map") and cls.name in config.auto_map
548 has_local_code = type(config) in cls._model_mapping.keys()
--> 549 trust_remote_code = resolve_trust_remote_code(
550 trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
551 )
/usr/local/lib/python3.12/dist-packages/transformers/models/auto/configuration_auto.py in from_pretrained(cls, pretrained_model_name_or_path, **kwargs)
/usr/local/lib/python3.12/dist-packages/transformers/dynamic_module_utils.py in get_class_from_dynamic_module(class_reference, pretrained_model_name_or_path, cache_dir, force_download, resume_download, proxies, token, revision, local_files_only, repo_type, code_revision, **kwargs)
614
615 full_name = (slow_tokenizer_class, fast_tokenizer_class)
--> 616
617 if isinstance(_config, dict):
618 auto_map = _config.get("auto_map", {})
/usr/local/lib/python3.12/dist-packages/transformers/dynamic_module_utils.py in get_class_in_module(class_name, module_path, force_reload)
309 Will be removed in v5 of Transformers.
310 proxies (Dict[str, str], optional):
--> 311 A dictionary of proxy servers to use by protocol or endpoint, e.g., {'http': 'foo.bar:3128', 312 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request.
313 token (str or bool, optional):
/usr/lib/python3.12/importlib/_bootstrap_external.py in exec_module(self, module)
/usr/lib/python3.12/importlib/_bootstrap.py in _call_with_frames_removed(f, *args, **kwds)
~/.cache/huggingface/modules/transformers_modules/openbmb/MiniCPM_hyphen_o_hyphen_4_5_hyphen_awq/cbbb3c88e689e212c4380f790715bbb1a3c68642/configuration_minicpmo.py in
23 from transformers.utils import logging
24
---> 25 from .modeling_navit_siglip import SiglipVisionConfig
26
27 logger = logging.get_logger(name)
~/.cache/huggingface/modules/transformers_modules/openbmb/MiniCPM_hyphen_o_hyphen_4_5_hyphen_awq/cbbb3c88e689e212c4380f790715bbb1a3c68642/modeling_navit_siglip.py in
36 from transformers.modeling_outputs import BaseModelOutput
37 from transformers.modeling_outputs import BaseModelOutputWithPooling
---> 38 from transformers.modeling_utils import PreTrainedModel
39 from transformers.utils import add_start_docstrings
40 from transformers.utils import add_start_docstrings_to_model_forward
/usr/local/lib/python3.12/dist-packages/transformers/modeling_utils.py in
76 prune_linear_layer,
77 )
---> 78 from .quantizers import AutoHfQuantizer, HfQuantizer
79 from .quantizers.quantizers_utils import get_module_from_name
80 from .safetensors_conversion import auto_conversion
/usr/local/lib/python3.12/dist-packages/transformers/quantizers/init.py in
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
---> 14 from .auto import AutoHfQuantizer, AutoQuantizationConfig, register_quantization_config, register_quantizer
15 from .base import HfQuantizer
16 from .quantizers_utils import get_module_from_name
/usr/local/lib/python3.12/dist-packages/transformers/quantizers/auto.py in
52 from .quantizer_hqq import HqqHfQuantizer
53 from .quantizer_quanto import QuantoHfQuantizer
---> 54 from .quantizer_quark import QuarkHfQuantizer
55 from .quantizer_spqr import SpQRHfQuantizer
56 from .quantizer_torchao import TorchAoHfQuantizer
/usr/local/lib/python3.12/dist-packages/transformers/quantizers/quantizer_quark.py in
16 from typing import TYPE_CHECKING, Any, Dict
17
---> 18 from ..file_utils import is_torch_available
19 from .base import HfQuantizer
20
/usr/local/lib/python3.12/dist-packages/transformers/file_utils.py in
24
25 # Backward compatibility imports, to make sure all those objects can be found in file_utils
---> 26 from .utils import (
27 CLOUDFRONT_DISTRIB_PREFIX,
28 CONFIG_NAME,
ImportError: cannot import name 'cached_property' from 'transformers.utils' (/usr/local/lib/python3.12/dist-packages/transformers/utils/init.py)