diff --git a/optimum/pipelines.py b/optimum/pipelines.py index fa3f569f9d..56cfbaef1c 100644 --- a/optimum/pipelines.py +++ b/optimum/pipelines.py @@ -327,15 +327,15 @@ def pipeline( no_feature_extractor_tasks = set() no_tokenizer_tasks = set() - for task, values in supported_tasks.items(): + for _task, values in supported_tasks.items(): if values["type"] == "text": - no_feature_extractor_tasks.add(task) + no_feature_extractor_tasks.add(_task) elif values["type"] in {"image", "video"}: - no_tokenizer_tasks.add(task) + no_tokenizer_tasks.add(_task) elif values["type"] in {"audio"}: - no_tokenizer_tasks.add(task) + no_tokenizer_tasks.add(_task) elif values["type"] not in ["multimodal", "audio", "video"]: - raise ValueError(f"SUPPORTED_TASK {task} contains invalid type {values['type']}") + raise ValueError(f"SUPPORTED_TASK {_task} contains invalid type {values['type']}") # copied from transformers.pipelines.__init__.py l.609 if targeted_task in no_tokenizer_tasks: @@ -372,7 +372,7 @@ def pipeline( feature_extractor = get_preprocessor(model_id) return transformers_pipeline( - targeted_task, + task, model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, diff --git a/tests/onnxruntime/test_modeling.py b/tests/onnxruntime/test_modeling.py index 1f43705f12..d80f410162 100644 --- a/tests/onnxruntime/test_modeling.py +++ b/tests/onnxruntime/test_modeling.py @@ -2917,10 +2917,7 @@ def test_pipeline_text_generation(self, test_name: str, model_arch: str, use_cac # Translation pipe = pipeline("translation_en_to_de", model=onnx_model, tokenizer=tokenizer) text = "This is a test" - if model_arch in ["m2m_100", "mbart"]: - outputs = pipe(text, src_lang="en", tgt_lang="fr") - else: - outputs = pipe(text) + outputs = pipe(text) self.assertEqual(pipe.device, onnx_model.device) self.assertIsInstance(outputs[0]["translation_text"], str)