From a6ac99b76fa65246fde9bc9acc1a434a15e6f89d Mon Sep 17 00:00:00 2001 From: mehran Date: Wed, 29 Jan 2025 13:51:26 +0330 Subject: [PATCH] Refactor SummaryRetrieval to subclass BitextMining --- mteb/abstasks/AbsTaskSummaryRetrieval.py | 122 ----------- mteb/abstasks/__init__.py | 1 - mteb/benchmarks/benchmarks.py | 1 + .../evaluators/SummaryRetrievalEvaluator.py | 191 ------------------ mteb/evaluation/evaluators/__init__.py | 3 +- .../fas/FaMTEBSummaryRetrieval.py | 21 +- tests/test_benchmark/mock_tasks.py | 47 +---- tests/test_benchmark/task_grid.py | 2 - 8 files changed, 19 insertions(+), 369 deletions(-) delete mode 100644 mteb/abstasks/AbsTaskSummaryRetrieval.py delete mode 100644 mteb/evaluation/evaluators/SummaryRetrievalEvaluator.py diff --git a/mteb/abstasks/AbsTaskSummaryRetrieval.py b/mteb/abstasks/AbsTaskSummaryRetrieval.py deleted file mode 100644 index 17fd772fdb..0000000000 --- a/mteb/abstasks/AbsTaskSummaryRetrieval.py +++ /dev/null @@ -1,122 +0,0 @@ -from __future__ import annotations - -import logging -from typing import Any - -from datasets import Dataset - -from mteb.encoder_interface import Encoder - -from ..evaluation.evaluators import SummaryRetrievalEvaluator -from ..load_results.task_results import ScoresDict -from .AbsTask import AbsTask -from .TaskMetadata import DescriptiveStatistics - -logger = logging.getLogger(__name__) - - -class SummaryRetrievalDescriptiveStatistics(DescriptiveStatistics): - """Descriptive statistics for Summary Retrieval - - Attributes: - num_samples: number of samples in the dataset. - number_of_characters: Total number of symbols in the dataset. - unique_pairs: Number of duplicate pairs - - min_text_length: Minimum length of text - average_text_length: Average length of text - max_text_length: Maximum length of text - unique_text: Number of duplicates in text - - min_summary_length: Minimum length of summary - average_summary_length: Average length of summary - max_summary_length: Maximum length of summary - """ - - num_samples: int - number_of_characters: int - unique_pairs: int - - min_text_length: int - average_text_length: float - max_text_length: int - unique_text: int - - min_summary_length: int - average_summary_length: float - max_summary_length: int - unique_summary: int - - -class AbsTaskSummaryRetrieval(AbsTask): - """Abstract class for SummaryRetrieval tasks - The similarity is computed between pairs and the results are ranked. - - self.load_data() must generate a huggingface dataset with a split matching self.metadata.eval_splits, and assign it to self.dataset. It must contain the following columns: - id: str - text: str - summary: str - """ - - def _evaluate_subset( - self, - model: Encoder, - data_split: Dataset, - *, - parallel: bool = False, - encode_kwargs: dict[str, Any] = {}, - **kwargs, - ) -> ScoresDict: - evaluator = SummaryRetrievalEvaluator( - data_split, - task_name=self.metadata.name, - pair_columns=[("text", "summary")], - **kwargs, - ) - metrics = evaluator(model, encode_kwargs=encode_kwargs) - self._add_main_score(metrics) - return metrics - - def _add_main_score(self, scores) -> None: - scores["main_score"] = scores[self.metadata.main_score] - - def _calculate_metrics_from_split( - self, split: str, hf_subset: str | None = None, compute_overall: bool = False - ) -> SummaryRetrievalDescriptiveStatistics: - pairs_cols = [("text", "summary")] - if hf_subset: - sent_1, sent_2 = pairs_cols[0] - text = self.dataset[hf_subset][split][sent_1] - summary = self.dataset[hf_subset][split][sent_2] - elif compute_overall: - text = [] - summary = [] - sent_1, sent_2 = pairs_cols[0] - for hf_subset in self.metadata.eval_langs: - text.extend(self.dataset[hf_subset][split][sent_1]) - summary.extend(self.dataset[hf_subset][split][sent_2]) - else: - sent_1, sent_2 = pairs_cols[0] - text = self.dataset[split][sent_1] - summary = self.dataset[split][sent_2] - s1_len = [len(s1) for s1 in text] - s2_len = [len(s2) for s2 in summary] - total_s1_len = sum(s1_len) - total_s2_len = sum(s2_len) - - unique_pairs = len(set(zip(text, summary))) - unique_text = len(set(text)) - unique_summary = len(set(summary)) - return SummaryRetrievalDescriptiveStatistics( - num_samples=len(text), - number_of_characters=total_s1_len + total_s2_len, - unique_pairs=unique_pairs, - min_text_length=min(s1_len), - average_text_length=sum(s1_len) / len(text), - max_text_length=max(s1_len), - unique_text=unique_text, - min_summary_length=min(s2_len), - average_summary_length=total_s2_len / len(summary), - max_summary_length=max(s2_len), - unique_summary=unique_summary, - ) diff --git a/mteb/abstasks/__init__.py b/mteb/abstasks/__init__.py index 3d246c953d..ef3e8853d7 100644 --- a/mteb/abstasks/__init__.py +++ b/mteb/abstasks/__init__.py @@ -13,5 +13,4 @@ from .AbsTaskSpeedTask import * from .AbsTaskSTS import * from .AbsTaskSummarization import * -from .AbsTaskSummaryRetrieval import * from .MultilingualTask import * diff --git a/mteb/benchmarks/benchmarks.py b/mteb/benchmarks/benchmarks.py index 142ee010e5..48c502b32a 100644 --- a/mteb/benchmarks/benchmarks.py +++ b/mteb/benchmarks/benchmarks.py @@ -1329,6 +1329,7 @@ def load_results( description="Main Persian (Farsi) benchmarks from MTEB", reference=None, citation=None, + contacts=['mehran-sarmadi', 'ERfun', 'morteza20'], ) CHEMTEB = Benchmark( diff --git a/mteb/evaluation/evaluators/SummaryRetrievalEvaluator.py b/mteb/evaluation/evaluators/SummaryRetrievalEvaluator.py deleted file mode 100644 index 4aba700b07..0000000000 --- a/mteb/evaluation/evaluators/SummaryRetrievalEvaluator.py +++ /dev/null @@ -1,191 +0,0 @@ -from __future__ import annotations - -import logging -from typing import Any - -import torch -import tqdm -from datasets import Dataset -from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score - -from mteb.encoder_interface import Encoder - -from .Evaluator import Evaluator -from .utils import cos_sim - -logger = logging.getLogger(__name__) - - -DEFAULT_PAIR = [("text", "summary")] - - -class SummaryRetrievalEvaluator(Evaluator): - def __init__( - self, - sentences: Dataset, - task_name: str | None = None, - pair_columns: list[tuple[str, str]] = DEFAULT_PAIR, - **kwargs, - ): - super().__init__(**kwargs) - self.pairs = pair_columns - self.n = len(sentences) - self.sentences = sentences - self.task_name = task_name - - def __call__(self, model: Encoder, *, encode_kwargs: dict[str, Any] = {}): - scores = self.compute_metrics(model, encode_kwargs=encode_kwargs) - return scores - - def compute_metrics(self, model: Encoder, encode_kwargs: dict[str, Any] = {}): - pair_elements = {p for pair in self.pairs for p in pair} - subsets = [ - col for col in self.sentences.features.keys() if col in pair_elements - ] - n_subsets = len(subsets) - - embeddings = {} - for sub in tqdm.tqdm(subsets, desc=f"Encoding {n_subsets}x{self.n} sentences"): - embeddings[sub] = model.encode( - self.sentences[sub], - task_name=self.task_name, - **encode_kwargs, - ) - - scores = {} - for i, (key1, key2) in enumerate( - tqdm.tqdm(self.pairs, desc="Matching sentences") - ): - scores[f"{key1}-{key2}"] = self._compute_metrics( - embeddings[key1], embeddings[key2], model - ) - - # in case of default pair unnest the dict - def_pair_str = "-".join(DEFAULT_PAIR[0]) - if def_pair_str in scores: - scores = scores[def_pair_str] - - return scores - - def _compute_metrics( - self, - embeddings1, - embeddings2, - model: Encoder, - ): - # Find nearest neighbors - logger.info("Finding nearest neighbors...") - nearest_neighbors = self._similarity_search( - embeddings1, embeddings2, model, top_k=1 - ) - - # Compute errors - logger.info("Computing metrics...") - labels = [] - predictions = [] - for i, x in enumerate(nearest_neighbors): - j = x[0]["corpus_id"] - predictions.append(j) - labels.append(i) - - scores = { - "precision": precision_score( - labels, predictions, zero_division=0, average="weighted" - ), - "recall": recall_score( - labels, predictions, zero_division=0, average="weighted" - ), - "f1": f1_score(labels, predictions, zero_division=0, average="weighted"), - "accuracy": accuracy_score(labels, predictions), - } - return scores - - def _similarity_search( - self, - query_embeddings, - corpus_embeddings, - model: Encoder, - query_chunk_size: int = 100, - corpus_chunk_size: int = 500000, - top_k: int = 10, - ): - """This function performs a cosine similarity search between a list of query embeddings and a list of corpus embeddings. - It can be used for Information Retrieval / Semantic Search for corpora up to about 1 Million entries. - - Args: - query_embeddings: A 2 dimensional tensor with the query embeddings. - corpus_embeddings: A 2 dimensional tensor with the corpus embeddings. - model: The model used to encode the queries and corpus. This is used to check if the embeddings are on the same device and to encode the queries and corpus if they are not already tensors. - query_chunk_size: Process 100 queries simultaneously. Increasing that value increases the speed, but requires more memory. - corpus_chunk_size: Scans the corpus 100k entries at a time. Increasing that value increases the speed, but requires more memory. - top_k: Retrieve top k matching entries. - - Returns: - Returns a list with one entry for each query. Each entry is a list of dictionaries with the keys 'corpus_id' and 'score', sorted by decreasing cosine similarity scores. - """ - query_embeddings = torch.from_numpy(query_embeddings) - corpus_embeddings = torch.from_numpy(corpus_embeddings) - if len(query_embeddings.shape) == 1: - query_embeddings = query_embeddings.unsqueeze(0) - if len(corpus_embeddings.shape) == 1: - corpus_embeddings = corpus_embeddings.unsqueeze(0) - - # Check that corpus and queries are on the same device - if corpus_embeddings.device != query_embeddings.device: - query_embeddings = query_embeddings.to(corpus_embeddings.device) - - queries_result_list = [[] for _ in range(len(query_embeddings))] - - for query_start_idx in range(0, len(query_embeddings), query_chunk_size): - # Iterate over chunks of the corpus - for corpus_start_idx in range(0, len(corpus_embeddings), corpus_chunk_size): - # Compute cosine similarities - similarity_scores = cos_sim( - query_embeddings[ - query_start_idx : query_start_idx + query_chunk_size - ], - corpus_embeddings[ - corpus_start_idx : corpus_start_idx + corpus_chunk_size - ], - ) - - if hasattr(model, "similarity"): - similarity_scores = model.similarity( - query_embeddings[ - query_start_idx : query_start_idx + query_chunk_size - ], - corpus_embeddings[ - corpus_start_idx : corpus_start_idx + corpus_chunk_size - ], - ) - - # Get top-k scores - cos_scores_top_k_values, cos_scores_top_k_idx = torch.topk( - similarity_scores, - min(top_k, len(similarity_scores[0])), - dim=1, - largest=True, - sorted=False, - ) - cos_scores_top_k_values = cos_scores_top_k_values.cpu().tolist() - cos_scores_top_k_idx = cos_scores_top_k_idx.cpu().tolist() - - for query_itr in range(len(similarity_scores)): - for sub_corpus_id, score in zip( - cos_scores_top_k_idx[query_itr], - cos_scores_top_k_values[query_itr], - ): - corpus_id = corpus_start_idx + sub_corpus_id - query_id = query_start_idx + query_itr - queries_result_list[query_id].append( - {"corpus_id": corpus_id, "score": score} - ) - - # Sort and strip to top_k results - for idx in range(len(queries_result_list)): - queries_result_list[idx] = sorted( - queries_result_list[idx], key=lambda x: x["score"], reverse=True - ) - queries_result_list[idx] = queries_result_list[idx][0:top_k] - - return queries_result_list diff --git a/mteb/evaluation/evaluators/__init__.py b/mteb/evaluation/evaluators/__init__.py index 8bcd896336..ec44dccb9a 100644 --- a/mteb/evaluation/evaluators/__init__.py +++ b/mteb/evaluation/evaluators/__init__.py @@ -7,5 +7,4 @@ from .RerankingEvaluator import * from .RetrievalEvaluator import * from .STSEvaluator import * -from .SummarizationEvaluator import * -from .SummaryRetrievalEvaluator import * +from .SummarizationEvaluator import * \ No newline at end of file diff --git a/mteb/tasks/SummaryRetrieval/fas/FaMTEBSummaryRetrieval.py b/mteb/tasks/SummaryRetrieval/fas/FaMTEBSummaryRetrieval.py index 27f2ed2290..77676f45da 100644 --- a/mteb/tasks/SummaryRetrieval/fas/FaMTEBSummaryRetrieval.py +++ b/mteb/tasks/SummaryRetrieval/fas/FaMTEBSummaryRetrieval.py @@ -1,10 +1,10 @@ from __future__ import annotations -from mteb.abstasks.AbsTaskSummaryRetrieval import AbsTaskSummaryRetrieval +from mteb.abstasks.AbsTaskBitextMining import AbsTaskBitextMining from mteb.abstasks.TaskMetadata import TaskMetadata -class SAMSumFa(AbsTaskSummaryRetrieval): +class SAMSumFa(AbsTaskBitextMining): metadata = TaskMetadata( name="SAMSumFa", description="Translated Version of SAMSum Dataset", @@ -28,9 +28,13 @@ class SAMSumFa(AbsTaskSummaryRetrieval): sample_creation="found", bibtex_citation=""" """, ) + def dataset_transform(self): + self.dataset = self.dataset.rename_columns( + {"text": "sentence1", "summary": "sentence2"} + ) -class SynPerChatbotSumSRetrieval(AbsTaskSummaryRetrieval): +class SynPerChatbotSumSRetrieval(AbsTaskBitextMining): metadata = TaskMetadata( name="SynPerChatbotSumSRetrieval", description="Synthetic Persian Chatbot Summary Dataset", @@ -54,9 +58,12 @@ class SynPerChatbotSumSRetrieval(AbsTaskSummaryRetrieval): sample_creation="LM-generated and verified", bibtex_citation=""" """, ) + def dataset_transform(self): + self.dataset = self.dataset.rename_columns( + {"text": "sentence1", "summary": "sentence2"} + ) - -class SynPerChatbotRAGSumSRetrieval(AbsTaskSummaryRetrieval): +class SynPerChatbotRAGSumSRetrieval(AbsTaskBitextMining): metadata = TaskMetadata( name="SynPerChatbotRAGSumSRetrieval", description="Synthetic Persian Chatbot RAG Summary Dataset", @@ -80,3 +87,7 @@ class SynPerChatbotRAGSumSRetrieval(AbsTaskSummaryRetrieval): sample_creation="LM-generated and verified", bibtex_citation=""" """, ) + def dataset_transform(self): + self.dataset = self.dataset.rename_columns( + {"text": "sentence1", "summary": "sentence2"} + ) \ No newline at end of file diff --git a/tests/test_benchmark/mock_tasks.py b/tests/test_benchmark/mock_tasks.py index b182890e94..ca1f87fab3 100644 --- a/tests/test_benchmark/mock_tasks.py +++ b/tests/test_benchmark/mock_tasks.py @@ -18,7 +18,6 @@ from mteb.abstasks.AbsTaskRetrieval import AbsTaskRetrieval from mteb.abstasks.AbsTaskSTS import AbsTaskSTS from mteb.abstasks.AbsTaskSummarization import AbsTaskSummarization -from mteb.abstasks.AbsTaskSummaryRetrieval import AbsTaskSummaryRetrieval from mteb.abstasks.TaskMetadata import TaskMetadata general_args = { @@ -2002,48 +2001,4 @@ def load_data(self, **kwargs): "eng": short_instructions, "fra": short_instructions, } - self.data_loaded = True - - -class MockSummaryRetrievalTask(AbsTaskSummaryRetrieval): - expected_stats = { - "test": { - "num_samples": 2, - "number_of_characters": 88, - "unique_pairs": 2, - "min_text_length": 24, - "average_text_length": 27.0, - "max_text_length": 30, - "unique_text": 2, - "min_summary_length": 13, - "average_summary_length": 17.0, - "max_summary_length": 21, - "unique_summary": 2, - } - } - - metadata = TaskMetadata( - type="SummaryRetrieval", - name="MockSummaryRetrievalTask", - main_score="f1", - **general_args, - ) - - def load_data(self, **kwargs): - # Mock data for summary retrieval - texts = ["This is a test document.", "This is another test document."] - summaries = ["Test summary.", "Another test summary."] - ids = ["doc1", "doc2"] - - self.dataset = DatasetDict( - { - "test": Dataset.from_dict( - { - "id": ids, - "text": texts, - "summary": summaries, - } - ) - } - ) - self.data_loaded = True + self.data_loaded = True \ No newline at end of file diff --git a/tests/test_benchmark/task_grid.py b/tests/test_benchmark/task_grid.py index 5a402e880a..c28ad3ea59 100644 --- a/tests/test_benchmark/task_grid.py +++ b/tests/test_benchmark/task_grid.py @@ -35,7 +35,6 @@ MockRetrievalTask, MockSTSTask, MockSummarizationTask, - MockSummaryRetrievalTask, ) twenty_news = TwentyNewsgroupsClusteringFast() @@ -90,7 +89,6 @@ MockMultilabelClassification(), MockMultilingualMultilabelClassification(), MockSummarizationTask(), - MockSummaryRetrievalTask(), MockMultilingualSummarizationTask(), MockInstructionRetrival(), MockMultilingualInstructionRetrival(),