diff --git a/vllm/v1/worker/lora_model_runner_mixin.py b/vllm/v1/worker/lora_model_runner_mixin.py index a8a19e0e620..3cbab840e96 100644 --- a/vllm/v1/worker/lora_model_runner_mixin.py +++ b/vllm/v1/worker/lora_model_runner_mixin.py @@ -28,20 +28,16 @@ def load_lora_model(self, model: nn.Module, model_config: ModelConfig, scheduler_config: SchedulerConfig, lora_config: LoRAConfig, device: str) -> nn.Module: - assert supports_lora( - model), f"{model.__class__.__name__} does not support LoRA yet." + if not supports_lora(model): + raise ValueError( + f"{model.__class__.__name__} does not support LoRA yet.") if supports_multimodal(model): logger.warning("Regarding multimodal models, vLLM currently " "only supports adding LoRA to language model.") - # It's necessary to distinguish between the max_position_embeddings - # of VLMs and LLMs. - if hasattr(model.config, "max_position_embeddings"): - max_pos_embeddings = model.config.max_position_embeddings - else: - max_pos_embeddings = ( - model.config.text_config.max_position_embeddings) + # Use get_text_config() in case of multimodal models + text_config = model_config.hf_config.get_text_config() # Add LoRA Manager to the Model Runner self.lora_manager = LRUCacheWorkerLoRAManager( @@ -52,7 +48,7 @@ def load_lora_model(self, model: nn.Module, model_config: ModelConfig, device, model.embedding_modules, model.embedding_padding_modules, - max_position_embeddings=max_pos_embeddings, + max_position_embeddings=text_config.max_position_embeddings, ) return self.lora_manager.create_lora_manager(model)