From 16f560a1670c9bf1216dfeb1f2947699395c9bb0 Mon Sep 17 00:00:00 2001 From: Jintao Huang Date: Tue, 14 Jan 2025 23:39:03 +0800 Subject: [PATCH] fix inspect init --- swift/llm/infer/deploy.py | 2 +- swift/llm/infer/infer_engine/lmdeploy_engine.py | 2 +- swift/llm/infer/infer_engine/vllm_engine.py | 4 ++-- swift/trainers/trainer_factory.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/swift/llm/infer/deploy.py b/swift/llm/infer/deploy.py index 87bf43c08..86b47304c 100644 --- a/swift/llm/infer/deploy.py +++ b/swift/llm/infer/deploy.py @@ -203,7 +203,7 @@ def run_deploy(args: DeployArguments, return_url: bool = False): deploy_args = args else: args_dict = asdict(args) - parameters = inspect.signature(DeployArguments.__init__).parameters + parameters = inspect.signature(DeployArguments).parameters for k in list(args_dict.keys()): if k not in parameters or args_dict[k] is None: args_dict.pop(k) diff --git a/swift/llm/infer/infer_engine/lmdeploy_engine.py b/swift/llm/infer/infer_engine/lmdeploy_engine.py index a665d4ea4..1ac42ce91 100644 --- a/swift/llm/infer/infer_engine/lmdeploy_engine.py +++ b/swift/llm/infer/infer_engine/lmdeploy_engine.py @@ -130,7 +130,7 @@ def _load_generation_config(self): max_new_tokens = kwargs.get('max_new_tokens') if max_new_tokens is None: kwargs.pop('max_new_tokens', None) - parameters = inspect.signature(LmdeployGenerationConfig.__init__).parameters + parameters = inspect.signature(LmdeployGenerationConfig).parameters for k, v in kwargs.copy().items(): if k not in parameters or v is None: kwargs.pop(k) diff --git a/swift/llm/infer/infer_engine/vllm_engine.py b/swift/llm/infer/infer_engine/vllm_engine.py index 0a5984d4c..c23c116df 100644 --- a/swift/llm/infer/infer_engine/vllm_engine.py +++ b/swift/llm/infer/infer_engine/vllm_engine.py @@ -110,7 +110,7 @@ def _prepare_engine_kwargs(self, disable_log_stats = engine_kwargs.pop('disable_log_stats', True) engine_kwargs['disable_log_requests'] = True - parameters = inspect.signature(AsyncEngineArgs.__init__).parameters + parameters = inspect.signature(AsyncEngineArgs).parameters if 'enable_lora' in parameters and enable_lora: engine_kwargs['enable_lora'] = enable_lora engine_kwargs['max_loras'] = max_loras @@ -167,7 +167,7 @@ def _load_generation_config(self) -> None: max_new_tokens = kwargs.get('max_new_tokens') if max_new_tokens is not None: kwargs['max_tokens'] = max_new_tokens - parameters = inspect.signature(SamplingParams.__init__).parameters + parameters = inspect.signature(SamplingParams).parameters for k, v in kwargs.copy().items(): if k not in parameters or v is None: kwargs.pop(k) diff --git a/swift/trainers/trainer_factory.py b/swift/trainers/trainer_factory.py index 19c93a042..4cc1edb46 100644 --- a/swift/trainers/trainer_factory.py +++ b/swift/trainers/trainer_factory.py @@ -50,7 +50,7 @@ def get_trainer_cls(cls, args): def get_training_args(cls, args): training_args_cls = cls.get_cls(args, cls.TRAINING_ARGS_MAPPING) args_dict = asdict(args) - parameters = inspect.signature(training_args_cls.__init__).parameters + parameters = inspect.signature(training_args_cls).parameters for k in list(args_dict.keys()): if k not in parameters: