From f98a8f5e21268339e62c3ffd9ae244c887ecdc10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Albert=20=C3=96rwall?= Date: Sun, 17 Nov 2024 19:27:03 +0100 Subject: [PATCH 1/2] Rework --- README.md | 142 +- moatless/__init__.py | 5 +- moatless/actions/__init__.py | 9 + moatless/actions/action.py | 143 + moatless/actions/apply_change_and_test.py | 124 + moatless/actions/code_change.py | 1000 + moatless/actions/code_modification_mixin.py | 110 + moatless/actions/create_file.py | 161 + moatless/actions/edit.py | 514 + moatless/actions/find_class.py | 95 + moatless/actions/find_code_snippet.py | 98 + moatless/actions/find_function.py | 133 + moatless/actions/finish.py | 77 + moatless/actions/insert_line.py | 194 + moatless/actions/model.py | 172 + moatless/actions/reject.py | 29 + moatless/actions/run_tests.py | 222 + moatless/actions/search_base.py | 311 + moatless/actions/semantic_search.py | 104 + moatless/actions/string_replace.py | 370 + moatless/actions/view_code.py | 301 + moatless/agent/__init__.py | 2 + moatless/agent/agent.py | 277 + moatless/agent/code_agent.py | 286 + moatless/agent/code_prompts.py | 477 + moatless/benchmark/claude_evaluation.py | 360 - moatless/benchmark/create_dataset.py | 155 - moatless/benchmark/evaluation.py | 517 - moatless/benchmark/loop_evaluation.py | 467 + moatless/benchmark/report_v1.py | 418 - moatless/benchmark/report_v2.py | 449 - moatless/benchmark/swebench/utils.py | 307 +- .../swebench_lite_all_evaluations.json | 64393 ++++++++++- .../swebench_verified_all_evaluations.json | 92635 ++++++++++++++++ moatless/benchmark/utils.py | 158 +- moatless/codeblocks/codeblocks.py | 339 +- moatless/codeblocks/module.py | 38 +- moatless/codeblocks/parser/create.py | 2 +- moatless/codeblocks/parser/parser.py | 126 +- moatless/codeblocks/parser/python.py | 2 +- moatless/completion/__init__.py | 1 + moatless/completion/completion.py | 1111 + moatless/completion/log_handler.py | 78 + moatless/completion/model.py | 312 + moatless/edit/__init__.py | 3 - moatless/edit/clarify.py | 330 - moatless/edit/edit.py | 339 - moatless/edit/plan.py | 334 - moatless/edit/plan_lines.py | 283 - moatless/edit/prompt.py | 51 - moatless/edit/review.py | 423 - moatless/exceptions.py | 50 + moatless/file_context.py | 1149 +- moatless/find/__init__.py | 3 - moatless/find/decide.py | 166 - moatless/find/find_code_snippet.py | 36 - moatless/find/identify.py | 180 - moatless/find/search.py | 485 - moatless/index/code_index.py | 557 +- moatless/index/code_node.py | 10 +- moatless/index/embed_model.py | 6 +- moatless/index/epic_split.py | 49 +- moatless/index/retry_voyage_embedding.py | 40 + moatless/index/simple_faiss.py | 5 +- moatless/index/types.py | 6 + moatless/loop.py | 924 +- moatless/node.py | 780 + moatless/repository/__init__.py | 11 +- moatless/repository/file.py | 488 +- moatless/repository/git.py | 176 +- moatless/repository/repository.py | 73 + moatless/{verify => runtime}/__init__.py | 0 moatless/runtime/runtime.py | 34 + moatless/runtime/testbed.py | 449 + moatless/schema.py | 33 + moatless/settings.py | 55 - moatless/state.py | 297 - moatless/trajectory.py | 261 - moatless/transition_rules.py | 168 - moatless/transitions.py | 257 - moatless/types.py | 125 - moatless/utils/misc.py | 26 + moatless/utils/repo.py | 99 +- moatless/verify/lint.py | 47 - moatless/verify/maven.py | 105 - moatless/verify/verify.py | 10 - moatless/workspace.py | 171 - poetry.lock | 3799 +- pyproject.toml | 51 +- .../classify.py => tests/actions/__init__.py | 0 tests/actions/test_action.py | 64 + tests/actions/test_create_file.py | 102 + tests/actions/test_edit.py | 126 + tests/actions/test_find_function.py | 47 + tests/actions/test_string_replace.py | 302 + tests/agent/test_code_agent.py | 159 + tests/benchmark/test_evaluation.py | 82 - tests/benchmark/test_report_v2.py | 39 - tests/codeblocks/data/makemigrations.py_ | 322 + tests/codeblocks/data/test_ridge.py_ | 862 + tests/codeblocks/test_codeblocks.py | 11 - tests/codeblocks/test_python_parser.py | 47 +- tests/conftest.py | 26 +- tests/edit/test_clarify.py | 135 - tests/edit/test_edit.py | 118 - tests/edit/test_plan.py | 115 - tests/find/test_decide.py | 115 - tests/find/test_identify.py | 90 - tests/find/test_search.py | 68 - tests/index/test_code_index.py | 201 + tests/index/test_epic_split.py | 99 + tests/integration_test.py | 140 - tests/repository/test_file.py | 172 + tests/repository/test_git.py | 71 + tests/test_file_context.py | 422 +- tests/test_json_extraction.py | 273 + tests/test_loop.py | 137 - tests/test_node.py | 180 + tests/test_repository.py | 50 + tests/test_state.py | 163 - tests/test_trajectory.py | 52 - tests/test_transition_rules.py | 212 - tests/trajectories/django__django_16379.json | 595 - tests/utils.py | 12 - 124 files changed, 167821 insertions(+), 17956 deletions(-) create mode 100644 moatless/actions/__init__.py create mode 100644 moatless/actions/action.py create mode 100644 moatless/actions/apply_change_and_test.py create mode 100644 moatless/actions/code_change.py create mode 100644 moatless/actions/code_modification_mixin.py create mode 100644 moatless/actions/create_file.py create mode 100644 moatless/actions/edit.py create mode 100644 moatless/actions/find_class.py create mode 100644 moatless/actions/find_code_snippet.py create mode 100644 moatless/actions/find_function.py create mode 100644 moatless/actions/finish.py create mode 100644 moatless/actions/insert_line.py create mode 100644 moatless/actions/model.py create mode 100644 moatless/actions/reject.py create mode 100644 moatless/actions/run_tests.py create mode 100644 moatless/actions/search_base.py create mode 100644 moatless/actions/semantic_search.py create mode 100644 moatless/actions/string_replace.py create mode 100644 moatless/actions/view_code.py create mode 100644 moatless/agent/__init__.py create mode 100644 moatless/agent/agent.py create mode 100644 moatless/agent/code_agent.py create mode 100644 moatless/agent/code_prompts.py delete mode 100644 moatless/benchmark/claude_evaluation.py delete mode 100644 moatless/benchmark/create_dataset.py delete mode 100644 moatless/benchmark/evaluation.py create mode 100644 moatless/benchmark/loop_evaluation.py delete mode 100644 moatless/benchmark/report_v1.py delete mode 100644 moatless/benchmark/report_v2.py create mode 100644 moatless/benchmark/swebench_verified_all_evaluations.json create mode 100644 moatless/completion/__init__.py create mode 100644 moatless/completion/completion.py create mode 100644 moatless/completion/log_handler.py create mode 100644 moatless/completion/model.py delete mode 100644 moatless/edit/__init__.py delete mode 100644 moatless/edit/clarify.py delete mode 100644 moatless/edit/edit.py delete mode 100644 moatless/edit/plan.py delete mode 100644 moatless/edit/plan_lines.py delete mode 100644 moatless/edit/prompt.py delete mode 100644 moatless/edit/review.py create mode 100644 moatless/exceptions.py delete mode 100644 moatless/find/__init__.py delete mode 100644 moatless/find/decide.py delete mode 100644 moatless/find/find_code_snippet.py delete mode 100644 moatless/find/identify.py delete mode 100644 moatless/find/search.py create mode 100644 moatless/index/retry_voyage_embedding.py create mode 100644 moatless/node.py create mode 100644 moatless/repository/repository.py rename moatless/{verify => runtime}/__init__.py (100%) create mode 100644 moatless/runtime/runtime.py create mode 100644 moatless/runtime/testbed.py create mode 100644 moatless/schema.py delete mode 100644 moatless/settings.py delete mode 100644 moatless/state.py delete mode 100644 moatless/trajectory.py delete mode 100644 moatless/transition_rules.py delete mode 100644 moatless/transitions.py delete mode 100644 moatless/types.py create mode 100644 moatless/utils/misc.py delete mode 100644 moatless/verify/lint.py delete mode 100644 moatless/verify/maven.py delete mode 100644 moatless/verify/verify.py delete mode 100644 moatless/workspace.py rename moatless/benchmark/plan/classify.py => tests/actions/__init__.py (100%) create mode 100644 tests/actions/test_action.py create mode 100644 tests/actions/test_create_file.py create mode 100644 tests/actions/test_edit.py create mode 100644 tests/actions/test_find_function.py create mode 100644 tests/actions/test_string_replace.py create mode 100644 tests/agent/test_code_agent.py delete mode 100644 tests/benchmark/test_evaluation.py delete mode 100644 tests/benchmark/test_report_v2.py create mode 100644 tests/codeblocks/data/makemigrations.py_ create mode 100644 tests/codeblocks/data/test_ridge.py_ delete mode 100644 tests/codeblocks/test_codeblocks.py delete mode 100644 tests/edit/test_clarify.py delete mode 100644 tests/edit/test_edit.py delete mode 100644 tests/edit/test_plan.py delete mode 100644 tests/find/test_decide.py delete mode 100644 tests/find/test_identify.py delete mode 100644 tests/find/test_search.py create mode 100644 tests/index/test_code_index.py create mode 100644 tests/index/test_epic_split.py delete mode 100644 tests/integration_test.py create mode 100644 tests/repository/test_file.py create mode 100644 tests/repository/test_git.py create mode 100644 tests/test_json_extraction.py delete mode 100644 tests/test_loop.py create mode 100644 tests/test_node.py create mode 100644 tests/test_repository.py delete mode 100644 tests/test_state.py delete mode 100644 tests/test_trajectory.py delete mode 100644 tests/test_transition_rules.py delete mode 100644 tests/trajectories/django__django_16379.json delete mode 100644 tests/utils.py diff --git a/README.md b/README.md index 879dc85d..c03e89f2 100644 --- a/README.md +++ b/README.md @@ -1,49 +1,127 @@ # Moatless Tools Moatless Tools is a hobby project where I experiment with some ideas I have about how LLMs can be used to edit code in large existing codebases. I believe that rather than relying on an agent to reason its way to a solution, it is crucial to build good tools to insert the right context into the prompt and handle the response. +_Right now I'm focusing on moatless-tree-search, an extended version of moatless-tools. The code in moatless-tools is now a simplified version of that code base_. + ## SWE-Bench I use the [SWE-bench benchmark](https://www.swebench.com/) as a way to verify my ideas and am currently sharing the sixth place on the SWE-Bench Lite Leaderboard. -### GPT-4o -Moatless Tools 0.0.1 has a solve rate of 24%, with each benchmark instance costing an average of $0.13 to solve with GPT-4o. Running the SWE Bench Lite dataset with 300 instances costs approx 40 dollars. +### Version 0.0.3: Claude 3.5 Sonnet v20241022 -[Try it out in Google Colab](https://colab.research.google.com/drive/15RpSjdprf9lcaP0oqKsuYfZl1c3kVB_t?usp=sharing) -### Claude 3.5 Sonnet +### Version 0.0.2: Claude 3.5 Sonnet With version 0.0.2 I get 26.7% solve rate with Claude 3.5 Sonnet, with a bit higher cost of $0.17 per instance. [Try the Claude 3.5 evaluation set up on Google Colab](https://colab.research.google.com/drive/1pKecc3pumsrOGzTOOCEqjRKzeCWLWQpj?usp=sharing) -## Try it out -I have focused on testing my ideas, and the project is currently a bit messy. My plan is to organize it in the coming period. However, feel free to clone the repo and try running this notebook: - -1. [Run Moatless Tools on any repository](notebooks/00_index_and_run.ipynb) - - -## How it works -The solution is based on an agentic loop that functions as a finite state machine, transitioning between states. Each state can have its own prompts and response handling. - -The following states are used in the usual workflow and code flow. - -### Search -The Search Loop uses function calling to find relevant code using the following parameters: - - * `query` - A query using natural language to describe the desired code. - * `code_snippet` - A specific code snippet that should be exactly matched. - * `class_name` - A specific class name to include in the search. - * `function_name` - A specific function name to include in the search. - * `file_pattern` - A glob pattern to filter search results to specific file types or directories. +### Version 0.0.1: GPT-4o +Moatless Tools 0.0.1 has a solve rate of 24%, with each benchmark instance costing an average of $0.13 to solve with GPT-4o. Running the SWE Bench Lite dataset with 300 instances costs approx 40 dollars. -For semantic search, a vector index is used, which is based on the llama index. This is a classic RAG solution where all code in the repository is chunked into relevant parts, such as at the method level, embedded, and indexed in a vector store. For class and function name search, a simple index is used where all function and class names are indexed. +[Try it out in Google Colab](https://colab.research.google.com/drive/15RpSjdprf9lcaP0oqKsuYfZl1c3kVB_t?usp=sharing) -### Identify -Identifies the code relevant to the task. If not all relevant code is found, it transitions back to Search. Once all relevant code is found, it transitions to PlanToCode. -### PlanToCode -Breaks down the request for code changes into smaller changes to specific parts (code spans) of the codebase. +# Try it out +I have focused on testing my ideas, and the project is currently a bit messy. My plan is to organize it in the coming period. However, feel free to clone the repo and try running this notebook: -### ClarifyChange -If the proposed changes affect too large a portion of the code, the change needs to be clarified to affect a smaller number of code lines. +1. [Run Moatless Tools on any repository](notebooks/00_index_and_run.ipynb) -### EditCode -Code is edited in search/replace blocks inspired by the edit block concept in [Aider](https://aider.chat/docs/benchmarks.html). In this concept, the LLM specifies the code to be changed in a search block and the code it will be changed to in a replace block. However, since the code to be changed is already known to the Code Loop, the search section is pre-filled, and the LLM only needs to respond with the replace section. The idea is that this reduces the risk of changing the wrong code by having an agreement on what to change before making the change. +## Environment Setup + +Before running the evaluation, you'll need: +1. At least one LLM provider API key (e.g., OpenAI, Anthropic, etc.) +2. A Voyage AI API key from [voyageai.com](https://voyageai.com) to use the pre-embedded vector stores for SWE-Bench instances. +3. (Optional) Access to a testbed environment - see [moatless-testbeds](https://github.com/aorwall/moatless-testbeds) for setup instructions + +You can configure these settings by either: + +1. Create a `.env` file in the project root (copy from `.env.example`): + + ```bash + cp .env.example .env + # Edit .env with your values + ``` + +2. Or export the variables directly: + + ```bash + # Directory for storing vector index store files + export INDEX_STORE_DIR="/tmp/index_store" + + # Directory for storing clonedrepositories + export REPO_DIR="/tmp/repos" + + # Required: At least one LLM provider API key + export OPENAI_API_KEY="" + export ANTHROPIC_API_KEY="" + export HUGGINGFACE_API_KEY="" + export DEEPSEEK_API_KEY="" + + # ...or Base URL for custom LLM API service (optional) + export CUSTOM_LLM_API_BASE="" + export CUSTOM_LLM_API_KEY="" + + # Required: API Key for Voyage Embeddings + export VOYAGE_API_KEY="" + + # Optional: Configuration for testbed environment (https://github.com/aorwall/moatless-testbeds) + export TESTBED_API_KEY="" + export TESTBED_BASE_URL="" + ``` + +## Example + +Basic setup using the `AgenticLoop` to solve a SWE-Bench instance. + +```python +from moatless.agent import ActionAgent +from moatless.agent.code_prompts import SIMPLE_CODE_PROMPT +from moatless.benchmark.swebench import create_repository +from moatless.benchmark.utils import get_moatless_instance +from moatless.completion import CompletionModel +from moatless.file_context import FileContext +from moatless.index import CodeIndex +from moatless.loop import AgenticLoop +from moatless.actions import FindClass, FindFunction, FindCodeSnippet, SemanticSearch, RequestMoreContext, RequestCodeChange, Finish, Reject + +index_store_dir = "/tmp/index_store" +repo_base_dir = "/tmp/repos" +persist_path = "trajectory.json" + +instance = get_moatless_instance("django__django-16379") + +completion_model = CompletionModel(model="gpt-4o", temperature=0.0) + +repository = create_repository(instance) + +code_index = CodeIndex.from_index_name( + instance["instance_id"], index_store_dir=index_store_dir, file_repo=repository +) + +actions = [ + FindClass(code_index=code_index, repository=repository), + FindFunction(code_index=code_index, repository=repository), + FindCodeSnippet(code_index=code_index, repository=repository), + SemanticSearch(code_index=code_index, repository=repository), + RequestMoreContext(repository=repository), + RequestCodeChange(repository=repository, completion_model=completion_model), + Finish(), + Reject() +] + +file_context = FileContext(repo=repository) +agent = ActionAgent(actions=actions, completion=completion_model, system_prompt=SIMPLE_CODE_PROMPT) + +loop = AgenticLoop.create( + message=instance["problem_statement"], + agent=agent, + file_context=file_context, + repository=repository, + persist_path=persist_path, + max_iterations=50, + max_cost=2.0 # Optional: Set maximum cost in dollars +) + +final_node = loop.run() +if final_node: + print(final_node.observation.message) +``` diff --git a/moatless/__init__.py b/moatless/__init__.py index bdf45ec7..61b68429 100644 --- a/moatless/__init__.py +++ b/moatless/__init__.py @@ -1,4 +1 @@ -from moatless.repository import FileRepository -from moatless.workspace import Workspace -from moatless.transition_rules import TransitionRules -from moatless.loop import AgenticLoop +# from moatless.loop import AgenticLoop, TransitionRules diff --git a/moatless/actions/__init__.py b/moatless/actions/__init__.py new file mode 100644 index 00000000..ae539c8e --- /dev/null +++ b/moatless/actions/__init__.py @@ -0,0 +1,9 @@ +from moatless.actions.code_change import RequestCodeChange +from moatless.actions.find_class import FindClass +from moatless.actions.find_code_snippet import FindCodeSnippet +from moatless.actions.find_function import FindFunction +from moatless.actions.finish import Finish +from moatless.actions.reject import Reject +from moatless.actions.run_tests import RunTests +from moatless.actions.semantic_search import SemanticSearch +from moatless.actions.view_code import ViewCode diff --git a/moatless/actions/action.py b/moatless/actions/action.py new file mode 100644 index 00000000..68738aee --- /dev/null +++ b/moatless/actions/action.py @@ -0,0 +1,143 @@ +import importlib +import logging +import pkgutil +from abc import ABC +from typing import List, Type, Tuple, Any, Dict, Optional, ClassVar + +from pydantic import BaseModel, ConfigDict + +from moatless.actions.model import ( + ActionArguments, + Observation, + FewShotExample, +) +from moatless.file_context import FileContext +from moatless.index import CodeIndex +from moatless.repository.repository import Repository + +logger = logging.getLogger(__name__) + +_actions: Dict[str, Type["Action"]] = {} + + +class Action(BaseModel, ABC): + args_schema: ClassVar[Type[ActionArguments]] + + model_config = ConfigDict(arbitrary_types_allowed=True) + + def __init__(self, **data): + super().__init__(**data) + + def execute(self, args: ActionArguments, file_context: FileContext) -> Observation: + """ + Execute the action. + """ + + message = self._execute(file_context=file_context) + return Observation.create(message) + + def _execute(self, file_context: FileContext) -> str | None: + """ + Execute the action and return the updated FileContext. + """ + raise NotImplementedError("Subclasses must implement this method.") + + @property + def name(self) -> str: + return self.__class__.__name__ + + + @classmethod + def from_dict( + cls, + obj: dict, + repository: Repository = None, + runtime: Any = None, + code_index: CodeIndex = None, + ) -> "Action": + obj = obj.copy() + obj.pop("args_schema", None) + action_class_path = obj.pop("action_class", None) + + if action_class_path: + module_name, class_name = action_class_path.rsplit(".", 1) + module = importlib.import_module(module_name) + action_class = getattr(module, class_name) + + if repository and hasattr(action_class, "_repository"): + obj["repository"] = repository + + if code_index and hasattr(action_class, "_code_index"): + obj["code_index"] = code_index + + if runtime and hasattr(action_class, "_runtime"): + obj["runtime"] = runtime + + return action_class.model_validate(obj) + + raise ValueError(f"Unknown action: {obj}") + + @classmethod + def model_validate(cls, obj: Any) -> "Action": + return cls(**obj) + + @classmethod + def get_action_by_args_class( + cls, args_class: Type[ActionArguments] + ) -> Optional[Type["Action"]]: + """ + Get the Action subclass corresponding to the given ActionArguments subclass. + + Args: + args_class: The ActionArguments subclass to look up. + + Returns: + The Action subclass if found, None otherwise. + """ + + def search_subclasses(current_class): + if ( + hasattr(current_class, "args_schema") + and current_class.args_schema == args_class + ): + return current_class + for subclass in current_class.__subclasses__(): + result = search_subclasses(subclass) + if result: + return result + return None + + return search_subclasses(cls) + + @classmethod + def get_action_by_name(cls, action_name: str) -> Type["Action"]: + """ + Dynamically import and return the appropriate Action class for the given action name. + """ + if not _actions: + cls._load_actions() + + action = _actions.get(action_name) + if action: + return action + + raise ValueError(f"Unknown action: {action_name}") + + @classmethod + def _load_actions(cls): + actions_package = importlib.import_module("moatless.actions") + + for _, module_name, _ in pkgutil.iter_modules(actions_package.__path__): + full_module_name = f"moatless.actions.{module_name}" + module = importlib.import_module(full_module_name) + for name, obj in module.__dict__.items(): + if isinstance(obj, type) and issubclass(obj, Action) and obj != Action: + _actions[name] = obj + + @classmethod + def get_few_shot_examples(cls) -> List[FewShotExample]: + """ + Returns a list of few-shot examples specific to this action. + Override this method in subclasses to provide custom examples. + """ + return [] diff --git a/moatless/actions/apply_change_and_test.py b/moatless/actions/apply_change_and_test.py new file mode 100644 index 00000000..3743413b --- /dev/null +++ b/moatless/actions/apply_change_and_test.py @@ -0,0 +1,124 @@ +import logging +from enum import Enum +from typing import List, Any + +from pydantic import Field, PrivateAttr, model_validator + +from moatless.actions import RequestCodeChange, RunTests +from moatless.actions.model import ActionArguments, Observation +from moatless.actions.run_tests import RunTestsArgs +from moatless.completion.completion import CompletionModel +from moatless.file_context import FileContext +from moatless.index import CodeIndex +from moatless.repository.repository import Repository +from moatless.runtime.runtime import RuntimeEnvironment + +logger = logging.getLogger(__name__) + + +class ChangeType(str, Enum): + addition = "addition" + modification = "modification" + deletion = "deletion" + + +class RequestCodeChangeArgs(ActionArguments): + """ + Apply a code change through an AI agent. This action instructs an AI assistant to + modify code based on provided instructions and pseudo-code. The AI will analyze the existing code within + the specified line range and apply changes while maintaining proper syntax, indentation, and context. + + After the change has been applied, relevant tests will be run. + """ + + file_path: str = Field(..., description="The file path of the code to be updated.") + instructions: str = Field( + ..., + description="Natural language instructions for the AI assistant describing the required code changes.", + ) + pseudo_code: str = Field( + ..., + description="Example code snippet illustrating the desired changes. The AI will use this as a reference for implementing the modifications.", + ) + change_type: ChangeType = Field( + ..., + description="Type of change to perform: 'addition' (insert new code), 'modification' (update existing code), or 'deletion' (remove code).", + ) + start_line: int = Field( + ..., + description="The line number where the code change should begin. For additions, specifies the insertion point.", + ) + end_line: int = Field( + ..., + description="The line number where the code change should end. For additions, specifies the insertion point.", + ) + + class Config: + title = "RequestCodeChange" + + @model_validator(mode="before") + @classmethod + def set_missing_end_line(cls, data: Any) -> Any: + if isinstance(data, dict): + if not data.get("end_line"): + data["end_line"] = data["start_line"] + + return data + + def equals(self, other: "RequestCodeChangeArgs") -> bool: + if not isinstance(other, RequestCodeChangeArgs): + return False + + return ( + self.file_path == other.file_path + and self.pseudo_code == other.pseudo_code + and self.change_type == other.change_type + and self.start_line == other.start_line + and self.end_line == other.end_line + ) + + +class ApplyCodeChangeAndTest(RequestCodeChange): + _runtime: RuntimeEnvironment = PrivateAttr() + _code_index: CodeIndex = PrivateAttr() + + def __init__( + self, + repository: Repository | None = None, + completion_model: CompletionModel | None = None, + runtime: RuntimeEnvironment | None = None, + code_index: CodeIndex | None = None, + **data, + ): + super().__init__( + repository=repository, completion_model=completion_model, **data + ) + self._runtime = runtime + self._code_index = code_index + + def execute( + self, args: RequestCodeChangeArgs, file_context: FileContext + ) -> Observation: + observation = super().execute(args, file_context) + + if not observation.properties or not observation.properties.get("diff"): + return observation + + run_tests = RunTests( + repository=self._repository, + runtime=self._runtime, + code_index=self._code_index, + ) + test_observation = run_tests.execute( + RunTestsArgs( + scratch_pad=args.scratch_pad, + test_files=[args.file_path], + ), + file_context, + ) + + observation.properties.update(test_observation.properties) + observation.message += "\n\n" + test_observation.message + + return observation + diff --git a/moatless/actions/code_change.py b/moatless/actions/code_change.py new file mode 100644 index 00000000..852532f5 --- /dev/null +++ b/moatless/actions/code_change.py @@ -0,0 +1,1000 @@ +import logging +from enum import Enum +from typing import Optional, List, Union, Tuple, Any, Type, ClassVar + +from pydantic import Field, PrivateAttr, model_validator + +from moatless.actions.action import Action +from moatless.actions.model import ( + ActionArguments, + FewShotExample, + Observation, +) +from moatless.codeblocks import CodeBlock, get_parser_by_path, PythonParser +from moatless.codeblocks.codeblocks import CodeBlockTypeGroup, CodeBlockType +from moatless.codeblocks.module import Module +from moatless.completion.completion import CompletionModel +from moatless.completion.model import AssistantMessage, UserMessage, Completion +from moatless.file_context import FileContext, ContextFile +from moatless.repository.file import do_diff, remove_duplicate_lines +from moatless.repository.repository import Repository +from moatless.utils.tokenizer import count_tokens + +logger = logging.getLogger(__name__) + +ROLE_PROMPT = "You are autonomous AI assisistant with superior programming skills." + +SEARCH_REPLACE_PROMPT = """# Objective: +Your task is to update the code within the `` tags based on the provided `` and ``. Follow these rules meticulously: + +1. **Understanding Instructions and Pseudo Code:** + - **Instructions:** Describe the specific changes that need to be made to the code. + - **Pseudo Code:** Provides a code snippet illustrating the proposed modification or addition. It serves as a guide for how to implement the changes. + - **Full Replacement:** Use the pseudo code and instructions to **completely replace** the entire content within the `` tags. The `` may address only a subset of the block, but your replacement should ensure that the entire block reflects the necessary updates. + +2. **Update Rules:** + - **Implement Changes Fully:** Integrate all changes as specified in `` and ``, ensuring the entire `` block is updated accordingly. + - **No Additional Changes:** Do not modify any part of the code outside the `` tags or make changes not explicitly requested. + +3. **Formatting and Indentation:** + - **CRITICAL: Preserve Exact Indentation:** Maintain the EXACT indentation level of the original code within the `` tags, including the very first line. Do not add any extra indentation to the entire block. + - **Consistent Formatting:** Ensure that the formatting (spaces, line breaks) matches the original code structure precisely. + +4. **Comments and Placeholders:** + - **Retain Existing Comments:** Keep any existing placeholder comments (e.g., `# ... other code`) intact within the updated block. + - **No New Comments:** Do not add comments describing your changes. + +5. **Response Formatting:** + - **Replacement Code:** Return the **entire updated code block** within `` tags, reflecting all necessary modifications. + - **Empty Replacement:** If all code within `` should be removed, return empty `` tags. + - **Rejection:** If unable to make the changes or if instructions are unclear/incorrect, use `` tags with a clear reason. + +# Response Format: + +**Successful Replacement:** + +[Entire updated code block with all modifications applied, maintaining original indentation] + + +**Empty Replacement:** + + + +**Rejection:** + +[Reason for rejection] + + +# IMPORTANT: + + * Do not include any code outside the tags. + * Ensure the indentation matches EXACTLY with the original code inside tags, including the first line. + * Completely replace the entire block with the updated code based on the instructions and pseudo code. + * Do not add or remove any lines unless instructed. + * Double-check that the first line of your block has the same indentation as the first line of the block. +""" + + +class ChangeType(str, Enum): + addition = "addition" + modification = "modification" + deletion = "deletion" + + +class RequestCodeChangeArgs(ActionArguments): + """ + Apply a code change through an AI agent. This action instructs an AI assistant to + modify code based on provided instructions and pseudo-code. The AI will analyze the existing code within + the specified line range and apply changes while maintaining proper syntax, indentation, and context. + """ + + file_path: str = Field(..., description="The file path of the code to be updated.") + instructions: str = Field( + ..., + description="Natural language instructions for the AI assistant describing the required code changes.", + ) + pseudo_code: str = Field( + ..., + description="Example code snippet illustrating the desired changes. The AI will use this as a reference for implementing the modifications.", + ) + change_type: ChangeType = Field( + ..., + description="Type of change to perform: 'addition' (insert new code), 'modification' (update existing code), or 'deletion' (remove code).", + ) + start_line: int = Field( + ..., + description="The line number where the code change should begin. For additions, specifies the insertion point.", + ) + end_line: int = Field( + ..., + description="The line number where the code change should end. For additions, specifies the insertion point.", + ) + + class Config: + title = "RequestCodeChange" + + @model_validator(mode="before") + @classmethod + def set_missing_end_line(cls, data: Any) -> Any: + if isinstance(data, dict): + if not data.get("end_line") and data.get("start_line"): + data["end_line"] = data["start_line"] + + return data + + def equals(self, other: "RequestCodeChangeArgs") -> bool: + if not isinstance(other, RequestCodeChangeArgs): + return False + + return ( + self.file_path == other.file_path + and self.pseudo_code == other.pseudo_code + and self.change_type == other.change_type + and self.start_line == other.start_line + and self.end_line == other.end_line + ) + + +class RequestCodeChange(Action): + args_schema: ClassVar[Type[ActionArguments]] = RequestCodeChangeArgs + + max_tokens_in_edit_prompt: int = Field( + default=750, + description="The maximum number of tokens allowed in the edit prompt.", + ) + show_file_context: bool = Field( + default=True, description="Whether to show the file context in the prompt." + ) + + _repository: Repository = PrivateAttr() + _completion_model: CompletionModel = PrivateAttr() + + def __init__( + self, + repository: Repository | None = None, + completion_model: CompletionModel | None = None, + **data, + ): + super().__init__(**data) + self._repository = repository + self._completion_model = completion_model + + def execute( + self, args: RequestCodeChangeArgs, file_context: FileContext + ) -> Observation: + logger.info( + f"RequestCodeChange: file_path={args.file_path}, start_line={args.start_line}, end_line={args.end_line}, change_type={args.change_type}" + ) + + if not args.instructions: + return Observation( + message="Please provide instructions for the code change.", + properties={"fail_reason": "no_instructions"}, + expect_correction=True, + ) + + if not args.pseudo_code: + return Observation( + message="Please provide pseudo code for the code change.", + properties={"fail_reason": "no_pseudo_code"}, + expect_correction=True, + ) + + if not args.file_path.endswith(".py"): + return Observation( + message="Only Python files can be edited. Please provide a file path to a Python file.", + properties={"fail_reason": "not_python_file"}, + expect_correction=True, + ) + + if args.change_type != ChangeType.addition and not self._repository.file_exists( + args.file_path + ): + return Observation( + message=f"File {args.file_path} not found.", + properties={"fail_reason": "file_not_found"}, + ) + + if self._repository.is_directory(args.file_path): + return Observation( + message=f"{args.file_path} is a directory. Please provide a file path.", + properties={"fail_reason": "is_directory"}, + ) + + if ( + not file_context.has_file(args.file_path) + and args.change_type != ChangeType.addition + ): + context_file = file_context.get_file(args.file_path) + message = f"File {args.file_path} is not in context." + if context_file.module: + message += f"At least one span must be added. Use RequestMoreContext to one ore more of the available spans: {self.span_id_list(context_file.module.span_ids)}" + + return Observation( + message=message, + properties={"fail_reason": "file_not_in_context"}, + ) + + context_file = file_context.get_file(args.file_path) + if not context_file: + logger.info( + f"File {args.file_path} is not found in the file repository. Will create it and add to context." + ) + + context_file = file_context.add_file(args.file_path) + updated_content = args.pseudo_code + return self._apply_changes(context_file, updated_content, args.file_path) + else: + # TODO: Verify if the code span is in context + # TODO: Check for hallucinations + retry_message = self.verify_request(context_file, args) + if retry_message: + extra = context_file.to_prompt( + show_line_numbers=True, + show_span_ids=True, + exclude_comments=False, + show_outcommented_code=False, + outcomment_code_comment="... other code", + ) + return Observation( + message=retry_message, extra=extra, expect_correction=True + ) + + completion = None + replace_block, args = self._get_replace_block_from_pseudo_code( + args, context_file + ) + if not replace_block: + if context_file.module: + args.start_line, args.end_line, args.change_type = ( + self.get_line_span( + args.change_type, + context_file, + args.start_line, + args.end_line, + self.max_tokens_in_edit_prompt, + ) + ) + + response, completion = self._generate_replace_block( + context_file, args, args.start_line, args.end_line + ) + + if "" in response: + rejection_message = response.split("")[1].split( + "" + )[0] + logger.info( + f"Rejected the instructions. Reason: {rejection_message}" + ) + return Observation( + message=f"Failed to apply changes using search/replace blocks. {rejection_message}", + properties={"fail_reason": "rejected"}, + execution_completion=completion, + ) + + replace_block = response.split("")[1].split("")[0] + + observation = self._update_content( + context_file, + replace_block, + args.start_line, + args.end_line, + args.change_type, + ) + + observation.execution_completion = completion + return observation + + def create_replacement_block( + self, messages: List[Union[UserMessage, AssistantMessage]] + ) -> Tuple[str, Any]: + try: + replace_code, completion = self._completion_model.create_text_completion( + messages=messages, + system_prompt=self._system_prompt(), + ) + + return replace_code, completion + except Exception as e: + logger.exception(f"Error applying change. Retrying...") + raise e + + def _system_prompt(self) -> str: + system_prompt = ROLE_PROMPT + + system_prompt += "\n\n" + system_prompt += SEARCH_REPLACE_PROMPT + + return system_prompt + + def _generate_replace_block( + self, + context_file: ContextFile, + args: RequestCodeChangeArgs, + start_line: int, + end_line: int, + ) -> Tuple[str, Completion]: + span_ids = [] + span_to_update = context_file.module.find_spans_by_line_numbers( + start_line, end_line + ) + if span_to_update: + # Pin the spans that are planned to be updated to context + for span in span_to_update: + if span.span_id not in span_ids: + span_ids.append(span.span_id) + context_file.add_spans(span_ids=set(span_ids), pinned=True) + + logger.info( + f"Requesting code change in {args.file_path} from {start_line} to {end_line}" + ) + + messages = [] + search_block = self.create_search_block( + context_file, start_line, end_line, args.change_type + ) + + user_message = self.create_message( + context_file, + search_block, + start_line, + end_line, + args.instructions, + args.pseudo_code, + ) + + messages.append(UserMessage(content=user_message)) + response, completion = self._completion_model.create_text_completion( + messages=messages, + system_prompt=self._system_prompt(), + ) + return response, completion + + def _update_content( + self, + context_file: ContextFile, + replace_block: str, + start_line: int, + end_line: int | None, + change_type: ChangeType, + ) -> Observation: + if replace_block: + updated_content = self._update_content_by_line_numbers( + context_file, start_line - 1, end_line, replace_block + ) + + updated_module = self._parse_module(context_file, updated_content) + if not updated_module: + invalid_response = "Code is invalid." + invalid_reason = "invalid_syntax" + else: + indentation_fix = self._check_indentation( + context_file, updated_module, start_line, end_line + ) + if indentation_fix: + replace_block = self._apply_indentation_fix( + replace_block, indentation_fix + ) + updated_content = self._update_content_by_line_numbers( + context_file, start_line - 1, end_line, replace_block + ) + updated_module = self._parse_module(context_file, updated_content) + + invalid_response, invalid_reason = self._verify_change( + updated_module, context_file, start_line, end_line, change_type + ) + if not invalid_response: + output = self._apply_changes( + context_file, updated_content, context_file.file_path + ) + return output + + else: + invalid_response = "The code in the replace tag is empty." + invalid_reason = "empty_replace_tag" + + logger.warning(f"Failed to apply changes. Reason: {invalid_response}") + return Observation( + message=f"Failed to apply changes using search/replace blocks. Reason: {invalid_response}" + f"Verify that the right lines are provided and that the code that should changed is in the context.", + properties={"fail_reason": invalid_reason}, + ) + + def _parse_pseudo_code_block(self, args: RequestCodeChangeArgs): + try: + parser = PythonParser(apply_gpt_tweaks=True) + return parser.parse(args.pseudo_code, file_path=args.file_path) + except Exception as e: + logger.warning( + f"Failed to parse pseudo code with error {e}. Pseudo code:\n{args.pseudo_code}" + ) + return None + + def create_message( + self, + file: ContextFile, + search_block: str, + start_line: int, + end_line: int, + instructions: str, + pseudo_code: str, + ) -> str: + content = "" + + # TODO: Be able to include intial problem statement? + # if self.show_initial_message: + # content = f"\n{self.initial_message}\n\n\n" + + if self.show_file_context: + file_context = FileContext(repo=self._repository, max_tokens=3000) + file_context.add_line_span_to_context(file.file_path, start_line, end_line) + # file_context.expand_context_with_related_spans(self.max_prompt_file_tokens) + + file_context_str = file_context.create_prompt( + show_line_numbers=True, + show_span_ids=False, + exclude_comments=False, + show_outcommented_code=False, + outcomment_code_comment="... other code", + ) + + content += f"\n\n{file_context_str}\n\n" + + content += f"\n\n{instructions}\n\n" + + if pseudo_code: + content += f"\n\n{pseudo_code}\n\n" + + if file: + content += f"\n{search_block}\n\n" + if self.show_file_context: + content += f"\nCode found on line numbers {start_line} to {end_line} in {file.file_path}:\n" + else: + content += "\n\n# No content...\n\n" + + return content + + def create_search_block( + self, file: ContextFile, start_line: int, end_line: int, change_type: ChangeType + ): + code_lines = file.content.split("\n") + lines_to_replace = code_lines[start_line - 1 : end_line] + code_to_replace = "\n".join(lines_to_replace) + if not code_to_replace and change_type != ChangeType.addition: + logger.warning( + f"No code found to replace in {file.file_path} from line {start_line} to {end_line}." + ) + return code_to_replace + + def verify_request( + self, context_file: ContextFile, args: RequestCodeChangeArgs + ) -> Optional[str]: + if not args.start_line: + message = "You must specify the start line and end line of the code change in the variables start_line and end_line. If you want to update the first line in the file, set start line to 1. If you believe that the lines you want to edit isn't in the file context, you can request more context by providing the file path and the line numbers or span ids to the RequestMoreContext function." + return message + + if not args.end_line: + if args.change_type != ChangeType.addition: + return f"If your intention is to modify an existing code span you must provide the end line for the code change in end_line." + + logger.info(f"End line not set, set to start line {args.start_line}") + args.end_line = args.start_line + + pseudo_code_block = self._parse_pseudo_code_block(args) + existing_hallucinated_spans = self.find_hallucinated_spans( + pseudo_code_block, context_file, args.start_line, args.end_line + ) + if existing_hallucinated_spans: + context_file.add_spans(existing_hallucinated_spans) + return f"""There where code in the pseudo code that wasn't present in the file context. +The following code spans where added to file context: {', '.join(existing_hallucinated_spans)}. +Please provide instructions for the code change again.""" + + # Verify that the code that is supposed to be changed is in the context + if context_file.module and args.change_type != ChangeType.addition: + code_block = self.find_smallest_covering_block( + context_file.module, args.start_line, args.end_line + ) + if ( + code_block + and code_block.belongs_to_span + and code_block.belongs_to_span.span_id not in context_file.span_ids + ): + return f"The code span {code_block.belongs_to_span.span_id} between lines {args.start_line} - {args.end_line} is not in the context. Please use the RequestMoreContext to add the correct line numbers or span ids to context." + # TODO: Handle if no code block is found + + code_lines = context_file.content.split("\n") + lines_to_edit = code_lines[args.start_line - 1 : args.end_line + 1] + code_to_edit = "\n".join(lines_to_edit) + + # Verify if change has already been implemented + existing_content = code_to_edit.expandtabs() + new_content = args.pseudo_code.expandtabs() + occurrences = existing_content.count(new_content) + if occurrences: + return f"The pseudo code does already exist in the existing code base indicating that the change has already been implemented. Existing content at lines {args.start_line} : {args.end_line}\n{existing_content}" + + tokens = count_tokens(code_to_edit) + if tokens > self.max_tokens_in_edit_prompt: + clarify_msg = ( + f"The code span between lines {args.start_line} - {args.end_line} has {tokens} tokens, which is higher than the " + f"maximum allowed {self.max_tokens_in_edit_prompt} tokens. " + ) + logger.info(f"{clarify_msg}. Ask for clarification.") + return f"The change request was rejected! {clarify_msg}. Narrow down the instructions and specify the exact part of the code that needs to be updated to fulfill the change. " + + return None + + def _get_replace_block_from_pseudo_code( + self, args: RequestCodeChangeArgs, context_file: ContextFile + ) -> Tuple[Optional[str], RequestCodeChangeArgs]: + pseudo_code_block = self._parse_pseudo_code_block(args) + if not pseudo_code_block: + return None, args + + if ( + len(pseudo_code_block.has_placeholders()) > 0 + or len(pseudo_code_block.children) > 1 + or pseudo_code_block.children[0].type.group != CodeBlockTypeGroup.STRUCTURE + ): + return None, args + + pseudo_code_block = pseudo_code_block.children[0] + + existing_block = None + if args.change_type == ChangeType.modification: + block_to_replace = self._get_block_to_replace( + context_file, args.start_line, args.end_line + ) + if ( + block_to_replace + and block_to_replace.type == pseudo_code_block.type + and block_to_replace.identifier == block_to_replace.identifier + ): + logger.info( + f"Found existing block {block_to_replace.path_string()} at start line {args.start_line} to be replaced by the pseudo code." + ) + existing_block = block_to_replace + elif args.change_type == ChangeType.addition: + block_at_start_line = context_file.module.find_first_by_start_line( + args.start_line + ) + if block_at_start_line: + if block_at_start_line.type.group == CodeBlockTypeGroup.STRUCTURE: + logger.info( + f"Found existing block {block_at_start_line.path_string()} at start line {args.start_line}. Add the new block after this block on line {block_at_start_line.end_line}." + ) + args.start_line = block_at_start_line.end_line + 1 + args.end_line = args.start_line + existing_block = block_at_start_line + else: + logger.warning( + f"Existing block {block_at_start_line.path_string()} at start line {args.start_line} has a different type than the pseudo code block {pseudo_code_block.path_string()}." + ) + else: + structure_block = self.find_smallest_covering_block( + context_file.module, args.start_line, args.start_line + ) + + if structure_block and structure_block.type in [ + CodeBlockType.CLASS, + CodeBlockType.MODULE, + ]: + existing_blocks = structure_block.find_blocks_with_type( + pseudo_code_block.type + ) + if existing_blocks: + # TODO: Pick the closest one + existing_block = existing_blocks[0] + else: + logger.warning( + f"No existing block of type {pseudo_code_block.type.display_name} found in {structure_block.path_string()}." + ) + + if existing_block: + indentation_diff = existing_block.compare_indentation(pseudo_code_block) + if indentation_diff: + replace_block = self._apply_indentation_fix( + args.pseudo_code, indentation_diff + ) + else: + replace_block = args.pseudo_code + + replace_block = "\n" + replace_block + "\n" + return replace_block, args + else: + return None, args + + def _apply_changes( + self, file: ContextFile, updated_content: str, file_path: str + ) -> Observation: + diff = do_diff(file_path, file.content, updated_content) + + if file.module: + existing_span_ids = file.module.get_all_span_ids() + + if not diff: + logger.info(f"No changes in {file_path}.") + return Observation( + message="Code wasn't updated, was the request code change the same as the existing code?", + properties={"fail_reason": "no_changes"}, + ) + + file.apply_changes(updated_content) + + return Observation( + message=f"Applied the change to {file_path}\n\n```diff\n{diff}\n```", + properties={"diff": diff}, + ) + + def _update_content_by_line_numbers( + self, + file: ContextFile, + start_line_index: int, + end_line_index: int, + replacement_content: str, + ) -> str: + replacement_lines = replacement_content.split("\n") + while replacement_lines and replacement_lines[0].strip() == "": + replacement_lines.pop(0) + while replacement_lines and replacement_lines[-1].strip() == "": + replacement_lines.pop() + + original_lines = file.content.split("\n") + replacement_lines = remove_duplicate_lines( + replacement_lines, original_lines[end_line_index:] + ) + updated_lines = ( + original_lines[:start_line_index] + + replacement_lines + + original_lines[end_line_index:] + ) + return "\n".join(updated_lines) + + def _parse_module(self, file: ContextFile, updated_content: str) -> Module | None: + parser = get_parser_by_path(file.file_path) + if not parser: + raise ValueError(f"Parser not found for {file.file_path}") + + try: + return parser.parse(updated_content) + except Exception as e: + logger.warning( + f"Failed to parse updated content in {file.file_path}: {e}. Content:\n{updated_content}" + ) + return None + + def _verify_change( + self, + updated_module: Module, + file: ContextFile, + start_line: int, + end_line: int, + change_type: Optional[ChangeType], + ) -> Tuple[str, str]: + existing_placeholders = file.module.find_blocks_with_type( + CodeBlockType.COMMENTED_OUT_CODE + ) + new_placeholders = ( + updated_module.find_blocks_with_type(CodeBlockType.COMMENTED_OUT_CODE) + if not existing_placeholders + else [] + ) + + if new_placeholders: + error_response = "" + for new_placeholder in new_placeholders: + parent_block = new_placeholder.find_type_group_in_parents( + CodeBlockTypeGroup.STRUCTURE + ) + if parent_block and parent_block.type != CodeBlockType.MODULE: + error_response += f"{parent_block.identifier} has a placeholder `{new_placeholder.content}` indicating that it's not fully implemented." + else: + error_response += f"There is a placeholder in the replace block indicating that it's not fully implemented. : \n```{new_placeholder.to_string()}\n```. \n" + return error_response, "placeholders" + + if change_type == ChangeType.modification: + existing_block = self._get_block_to_replace(file, start_line, end_line) + if existing_block: + new_block = updated_module.find_first_by_start_line(start_line) + if existing_block.indentation != new_block.indentation: + return ( + f"The code in the tag has an indentation of {len(new_block.indentation)} spaces while the code in the tag has {len(existing_block.indentation)} spaces.", + "indentation", + ) + + block_in_updated_code = file.module.find_by_path( + existing_block.full_path() + ) + if existing_block.type != new_block.type and not ( + block_in_updated_code + or block_in_updated_code.type != existing_block.type + ): + return ( + f"The code block {existing_block.identifier} in the tag with the type {existing_block.type.display_name} was expected to be replaced. But the code provided in the tag has the type {new_block.type.display_name}.", + "block_type", + ) + + return None, None + + def _get_block_to_replace(self, file: ContextFile, start_line: int, end_line: int): + code_block = file.module.find_first_by_start_line(start_line) + if ( + code_block + and code_block.start_line == start_line + and code_block.end_line == end_line + and code_block.type.group == CodeBlockTypeGroup.STRUCTURE + ): + return code_block + return None + + def find_hallucinated_spans( + self, + code_block: CodeBlock, + context_file: ContextFile, + start_line: int, + end_line: int, + ) -> set[str]: + """ + Find out if the suggested code block contains any identifiers that are not present in the context. + """ + + existing_hallucinated_spans = set() + for child_block in code_block.children: + # Only verify structure blocks like classed and functions + if child_block.type.group != CodeBlockTypeGroup.STRUCTURE: + continue + + if child_block.type == CodeBlockType.CLASS: + existing_hallucinated_spans.update( + self.find_hallucinated_spans( + child_block, context_file, start_line, end_line + ) + ) + + # Check if the pseudo code identifier is part of any existing span_id + if any( + child_block.identifier in span_id for span_id in context_file.span_ids + ): + continue + + span_id = child_block.belongs_to_span.span_id + existing_block = context_file.module.find_first_by_span_id( + child_block.belongs_to_span.span_id + ) + if existing_block: + logger.info( + f"Checking if {span_id} is in context. Found {existing_block}" + ) + existing_hallucinated_spans.add(span_id) + else: + if "." not in span_id: + # Check if there is child blocks with the span_id as identifier + child_blocks = context_file.module.find_blocks_with_identifier( + span_id + ) + + for child_block in child_blocks: + if context_file.has_span(child_block.belongs_to_span.span_id): + continue + + parent_block = child_block.find_type_group_in_parents( + CodeBlockTypeGroup.STRUCTURE + ) + if ( + parent_block + and parent_block.type + in [CodeBlockType.CLASS, CodeBlockType.FUNCTION] + and parent_block.has_lines(start_line, end_line) + ) or child_block.is_within_lines(start_line, end_line): + logger.info( + f"Found child block {child_block.identifier} with {child_block.belongs_to_span.span_id} of {span_id} in context." + ) + existing_hallucinated_spans.add( + child_block.belongs_to_span.span_id + ) + + return existing_hallucinated_spans + + def find_smallest_covering_block( + self, code_block: CodeBlock, start_line: int, end_line: int + ) -> Optional[CodeBlock]: + # If the code_block doesn't cover the lines, return None + if code_block.start_line > start_line or code_block.end_line < end_line: + return None + + # Check if any child block covers the lines + for child in code_block.children: + if child.start_line <= start_line and child.end_line >= end_line: + # Found a smaller block that covers the lines + smaller_block = self.find_smallest_covering_block( + child, start_line, end_line + ) + + if child.type.group == CodeBlockTypeGroup.STRUCTURE: + return smaller_block or child + + # No smaller block found, return the current block + return code_block + + def find_lines_within_blocks( + self, code_block: CodeBlock, start_line: int, end_line: int + ) -> List[int]: + # Collect lines from code blocks within max_tokens + lines = [] + + def traverse_blocks(block: CodeBlock): + if block.end_line < start_line or block.start_line > end_line: + return + + for child in block.children: + traverse_blocks(child) + + # It's a code block within the line range + if block.start_line >= start_line and block.end_line <= end_line: + lines.extend(range(block.start_line, block.end_line + 1)) + elif ( + not block.children + and block.end_line >= start_line + and block.start_line <= end_line + ): + lines.extend(range(block.start_line, block.end_line + 1)) + + traverse_blocks(code_block) + return sorted(set(lines)) + + def get_line_span( + self, + change_type: ChangeType, + file: ContextFile, + start_line: int, + end_line: int, + max_tokens: int, + ) -> tuple[Optional[int], Optional[int], Optional[ChangeType]]: + if not end_line: + end_line = start_line + + structure_block = self.find_smallest_covering_block( + file.module, start_line, end_line + ) + if structure_block: + logger.info( + f"Found smallest covering block {structure_block.display_name} (start_line: {structure_block.start_line}, end_line: {structure_block.end_line}, tokens: {structure_block.sum_tokens()})" + ) + + if structure_block.type == CodeBlockType.CLASS: + class_start_line, init_end_line, tokens = self.get_class_init_span( + structure_block + ) + + if ( + class_start_line <= start_line <= end_line <= init_end_line + and tokens < max_tokens + ): + logger.info( + f"Return class init block {structure_block.display_name} (start_line: {class_start_line}, end_line: {init_end_line}, tokens: {tokens})" + ) + return class_start_line, init_end_line, change_type + + if structure_block.sum_tokens() < max_tokens: + logger.info( + f"Return block {structure_block.display_name} (start_line: {structure_block.start_line}, end_line: {structure_block.end_line}, tokens: {structure_block.sum_tokens()}" + ) + + return structure_block.start_line, structure_block.end_line, change_type + + lines = self.find_lines_within_blocks( + file.module, max(0, start_line - 5), min(file.module.end_line, end_line + 5) + ) + if lines and len(lines) > 1: + logger.info( + f"Could not find start and end block for lines {start_line}-{end_line}. Return {lines[0]}-{lines[-1]}" + ) + return lines[0], lines[-1], change_type + else: + logger.info( + f"Could not find any lines within blocks for lines {start_line}-{end_line}. Returning original start and end lines." + ) + return start_line, end_line, change_type + + def get_class_init_span(self, class_block: CodeBlock): + """ + Get end line of the class initation span by including all lines until the first function or class + """ + end_line = class_block.start_line + len(class_block.content_lines) - 1 + tokens = class_block.tokens + for child in class_block.children: + if ( + child.type.group == CodeBlockTypeGroup.STRUCTURE + and child.type != CodeBlockType.CONSTRUCTOR + ): + break + + end_line = child.end_line + tokens += child.tokens + + return class_block.start_line, end_line, tokens + + def _check_indentation( + self, + context_file: ContextFile, + updated_module: Module, + start_line: int, + end_line: int, + ) -> Optional[int]: + existing_block = self._get_block_to_replace(context_file, start_line, end_line) + if existing_block: + new_block = updated_module.find_first_by_start_line(start_line) + indentation_diff = existing_block.compare_indentation(new_block) + if ( + indentation_diff != 0 + and new_block.identifier == existing_block.identifier + ): + logger.info( + f"Indentation difference detected: {indentation_diff} spaces on updated block {existing_block.identifier}" + ) + return indentation_diff + + return None + + def _apply_indentation_fix(self, content: str, indentation_diff: int) -> str: + lines = content.split("\n") + if indentation_diff > 0: + return "\n".join(" " * indentation_diff + line for line in lines) + else: + return "\n".join(line[-indentation_diff:] for line in lines) + + def span_id_list(self, span_ids: set[str]) -> str: + list_str = "" + for span_id in span_ids: + list_str += f" * {span_id}\n" + return list_str + + def model_dump(self, **kwargs): + dump = super().model_dump(**kwargs) + dump["completion_model"] = self._completion_model.model_dump(**kwargs) + return dump + + @classmethod + def model_validate(cls, obj: dict): + if "completion_model" in obj and obj["completion_model"]: + obj["completion_model"] = CompletionModel.model_validate( + obj["completion_model"] + ) + + return cls(**obj) + + @classmethod + def get_few_shot_examples(cls) -> List[FewShotExample]: + return [ + FewShotExample.create( + user_input="Add error handling to the process_payment method in the PaymentProcessor class", + action=RequestCodeChangeArgs( + scratch_pad="We need to add try-catch blocks to handle potential payment processing errors.", + file_path="payment/processor.py", + instructions="Add error handling to catch and handle payment processing exceptions", + pseudo_code="""try: + result = self._process_transaction(payment_data) + return result +except PaymentError as e: + logger.error(f"Payment processing failed: {e}") + raise PaymentProcessingError(f"Failed to process payment: {e}")""", + change_type=ChangeType.modification, + start_line=45, + end_line=47, + ), + ), + FewShotExample.create( + user_input="Add import for the logging module", + action=RequestCodeChangeArgs( + scratch_pad="We need to add the logging import at the top of the file.", + file_path="utils/helper.py", + instructions="Add import for the logging module", + pseudo_code="import logging", + change_type=ChangeType.addition, + start_line=1, + end_line=1, + ), + ), + ] diff --git a/moatless/actions/code_modification_mixin.py b/moatless/actions/code_modification_mixin.py new file mode 100644 index 00000000..67a6ec7e --- /dev/null +++ b/moatless/actions/code_modification_mixin.py @@ -0,0 +1,110 @@ +import logging +from pathlib import Path +from typing import Optional, Tuple + +from pydantic import PrivateAttr + +from moatless.actions.model import Observation +from moatless.actions.run_tests import RunTests, RunTestsArgs +from moatless.file_context import FileContext +from moatless.index import CodeIndex +from moatless.repository.repository import Repository +from moatless.runtime.runtime import RuntimeEnvironment + +logger = logging.getLogger(__name__) + + +class CodeModificationMixin: + """ + A mixin that provides common functionality for actions that modify code files. + This includes path normalization, file validation, test running, and observation handling. + """ + + _runtime: RuntimeEnvironment | None = PrivateAttr(default=None) + _code_index: CodeIndex | None = PrivateAttr(default=None) + _repository: Repository | None = PrivateAttr(default=None) + + def normalize_path(self, file_path: str) -> str: + """Normalize file path by removing /repo and leading /""" + if file_path.startswith("/repo"): + file_path = file_path[5:] + if file_path.startswith("/"): + file_path = file_path[1:] + return file_path + + def validate_file_access( + self, file_path: str, file_context: FileContext, allow_missing: bool = False + ) -> Tuple[Optional[Path], Optional[Observation]]: + """ + Validate file access and return either a valid Path object or an error Observation. + + Args: + file_path: The path to validate + file_context: The file context + allow_missing: Whether to allow missing files (for file creation) + + Returns: + Tuple of (Path object if valid, Error observation if invalid) + """ + path = Path(file_path) + + if not allow_missing and not file_context.file_exists(str(path)): + return None, Observation( + message=f"File {path} not found.", + properties={"fail_reason": "file_not_found"}, + ) + + if allow_missing and file_context.file_exists(str(path)): + return None, Observation( + message=f"File already exists at: {path}. Cannot overwrite existing file.", + properties={"fail_reason": "file_exists"}, + ) + + if not allow_missing: + context_file = file_context.get_context_file(str(path)) + if not context_file: + return None, Observation( + message=f"Could not get context for file: {path}", + properties={"fail_reason": "context_error"}, + ) + + return path, None + + def run_tests_and_update_observation( + self, + observation: Observation, + file_path: str, + scratch_pad: str, + file_context: FileContext, + ) -> Observation: + """Run tests and update the observation with test results""" + if not observation.properties or not observation.properties.get("diff"): + return observation + + if not self._runtime: + return observation + + run_tests = RunTests( + repository=self._repository, + runtime=self._runtime, + code_index=self._code_index, + ) + + test_observation = run_tests.execute( + RunTestsArgs( + scratch_pad=scratch_pad, + test_files=[file_path], + ), + file_context, + ) + + observation.properties.update(test_observation.properties) + observation.message += "\n\n" + test_observation.message + + return observation + + def format_snippet_with_lines(self, snippet: str, start_line: int) -> str: + """Format a code snippet with line numbers""" + return "\n".join( + f"{i + start_line:6}\t{line}" for i, line in enumerate(snippet.split("\n")) + ) diff --git a/moatless/actions/create_file.py b/moatless/actions/create_file.py new file mode 100644 index 00000000..4c79b1ce --- /dev/null +++ b/moatless/actions/create_file.py @@ -0,0 +1,161 @@ +import logging +from pathlib import Path +from typing import List + +from pydantic import Field + +from moatless.actions.action import Action +from moatless.actions.code_modification_mixin import CodeModificationMixin +from moatless.actions.model import ActionArguments, Observation, FewShotExample +from moatless.actions.run_tests import RunTests, RunTestsArgs +from moatless.file_context import FileContext +from moatless.index import CodeIndex +from moatless.repository.file import do_diff +from moatless.repository.repository import Repository +from moatless.runtime.runtime import RuntimeEnvironment + +logger = logging.getLogger(__name__) + + +class CreateFileArgs(ActionArguments): + """ + Create a new file with specified content. + + Notes: + * Cannot be used if the specified path already exists + * Will create parent directories if they don't exist + * File content should include proper indentation and formatting + """ + + path: str = Field(..., description="Path where the new file should be created") + file_text: str = Field(..., description="Complete content to write to the new file") + + class Config: + title = "CreateFile" + + +class CreateFile(Action, CodeModificationMixin): + """ + Action to create a new file with specified content. + """ + + args_schema = CreateFileArgs + + def __init__( + self, + runtime: RuntimeEnvironment | None = None, + code_index: CodeIndex | None = None, + repository: Repository | None = None, + **data, + ): + super().__init__(**data) + # Initialize mixin attributes directly + object.__setattr__(self, "_runtime", runtime) + object.__setattr__(self, "_code_index", code_index) + object.__setattr__(self, "_repository", repository) + + def execute(self, args: CreateFileArgs, file_context: FileContext) -> Observation: + if args.path.startswith("/repo"): + args.path = args.path[5:] + if args.path.startswith("/"): + args.path = args.path[1:] + + path = Path(args.path) + + if file_context.file_exists(str(path)): + return Observation( + message=f"File already exists at: {path}. Cannot overwrite files using create command.", + properties={"fail_reason": "file_exists"}, + ) + + context_file = file_context.add_file(str(path)) + context_file.apply_changes(args.file_text) + + diff = do_diff(str(path), "", args.file_text) + + observation = Observation( + message=f"File created successfully at: {path}", + properties={"diff": diff, "success": True}, + ) + + if not self._runtime: + return observation + + run_tests = RunTests( + repository=self._repository, + runtime=self._runtime, + code_index=self._code_index, + ) + test_observation = run_tests.execute( + RunTestsArgs( + scratch_pad=args.scratch_pad, + test_files=[args.path], + ), + file_context, + ) + + observation.properties.update(test_observation.properties) + observation.message += "\n\n" + test_observation.message + + return observation + + @classmethod + def get_few_shot_examples(cls) -> List[FewShotExample]: + return [ + FewShotExample.create( + user_input="Create a new Python file for handling user authentication", + action=CreateFileArgs( + scratch_pad="Creating a new authentication module with basic user authentication functionality", + path="auth/user_auth.py", + file_text="""import logging +from typing import Optional + +logger = logging.getLogger(__name__) + +class UserAuth: + def __init__(self): + self._users = {} + + def authenticate(self, username: str, password: str) -> bool: + if username not in self._users: + logger.warning(f"Authentication failed: User {username} not found") + return False + + return self._users[username] == password + + def register(self, username: str, password: str) -> bool: + if username in self._users: + logger.error(f"Registration failed: User {username} already exists") + return False + + self._users[username] = password + logger.info(f"User {username} registered successfully") + return True""", + ), + ), + FewShotExample.create( + user_input="Create a new configuration file", + action=CreateFileArgs( + scratch_pad="Creating a configuration file with basic settings", + path="config/settings.py", + file_text="""from pathlib import Path + +BASE_DIR = Path(__file__).resolve().parent.parent + +DEBUG = True + +DATABASE = { + 'host': 'localhost', + 'port': 5432, + 'name': 'myapp_db', + 'user': 'admin' +} + +LOGGING = { + 'version': 1, + 'disable_existing_loggers': False, + 'level': 'INFO' +}""", + ), + ), + ] diff --git a/moatless/actions/edit.py b/moatless/actions/edit.py new file mode 100644 index 00000000..9282ab46 --- /dev/null +++ b/moatless/actions/edit.py @@ -0,0 +1,514 @@ +import logging +from pathlib import Path +from typing import Literal, Optional, List + +from pydantic import Field, PrivateAttr + +from moatless.actions import RunTests +from moatless.actions.action import Action +from moatless.actions.model import ActionArguments, Observation, RetryException +from moatless.actions.run_tests import RunTestsArgs +from moatless.actions.string_replace import StringReplace, StringReplaceArgs +from moatless.completion.model import ToolCall +from moatless.file_context import FileContext +from moatless.index import CodeIndex +from moatless.repository.file import do_diff +from moatless.repository.repository import Repository +from moatless.runtime.runtime import RuntimeEnvironment +from moatless.utils.tokenizer import count_tokens + +logger = logging.getLogger(__name__) + +Command = Literal[ + "view", + "create", + "str_replace", + "insert", + "undo_edit", +] + +SNIPPET_LINES: int = 4 + + +class EditActionArguments(ActionArguments): + """ + An filesystem editor tool that allows the agent to view, create, and edit files. + """ + + command: Command = Field(..., description="The edit command to execute") + path: str = Field(..., description="The file path to edit") + file_text: Optional[str] = Field( + None, description="The text content for file creation" + ) + view_range: Optional[List[int]] = Field( + None, description="Range of lines to view [start, end]" + ) + old_str: Optional[str] = Field(None, description="String to replace") + new_str: Optional[str] = Field(None, description="Replacement string") + insert_line: Optional[int] = Field(None, description="Line number for insertion") + + class Config: + title = "str_replace_editor" + + def to_tool_call(self) -> ToolCall: + return ToolCall( + name=self.name, type="text_editor_20241022", input=self.model_dump() + ) + + +class ClaudeEditTool(Action): + """ + An filesystem editor tool that allows the agent to view, create, and edit files. + The tool parameters are defined by Anthropic and are not editable. + """ + + args_schema = EditActionArguments + + max_tokens_to_view: int = Field( + 2000, description="Max tokens to view in one command" + ) + + _runtime: RuntimeEnvironment | None = PrivateAttr(None) + _code_index: CodeIndex | None = PrivateAttr(None) + _repository: Repository | None = PrivateAttr(None) + + def __init__( + self, + runtime: RuntimeEnvironment | None = None, + code_index: CodeIndex | None = None, + repository: Repository | None = None, + **data, + ): + super().__init__(**data) + self._runtime = runtime + self._code_index = code_index + self._repository = repository + + def execute( + self, args: EditActionArguments, file_context: FileContext + ) -> Observation: + # Claude tends to add /repo in the start of the file path. + # TODO: Maybe we should add /repo as default on all paths? + if args.path.startswith("/repo"): + args.path = args.path[5:] + + # Remove leading `/` if present + # TODO: Solve by adding /repo to all paths? + if args.path.startswith("/"): + args.path = args.path[1:] + + path = Path(args.path) + + validation_error = self.validate_path(file_context, args.command, path) + if validation_error: + return Observation( + message=validation_error, + properties={"fail_reason": "invalid_path"}, + expect_correction=True, + ) + + if args.command == "view": + return self._view(file_context, path, args) + elif args.command == "create": + if not args.file_text: + raise RetryException( + message="Parameter `file_text` is required for command: create", + action_args=args, + ) + observation = self._create(file_context, path, args.file_text) + elif args.command == "str_replace": + if not args.old_str: + raise RetryException( + message="Parameter `old_str` is required for command: str_replace", + action_args=args, + ) + str_replace = StringReplace( + runtime=self._runtime, + code_index=self._code_index, + repository=self._repository, + ) + return str_replace.execute( + StringReplaceArgs( + path=args.path, + old_str=args.old_str, + new_str=args.new_str or "", + scratch_pad=args.scratch_pad, + ), + file_context, + ) + elif args.command == "insert": + if args.insert_line is None: + raise RetryException( + message="Parameter `insert_line` is required for command: insert", + action_args=args, + ) + if args.new_str is None: + raise RetryException( + message="Parameter `new_str` is required for command: insert", + action_args=args, + ) + observation = self._insert( + file_context, path, args.insert_line, args.new_str + ) + else: + raise RetryException( + message=f"Unknown command: {args.command}", + action_args=args, + ) + + if not observation.properties or not observation.properties.get("diff"): + return observation + + if not self._runtime: + return observation + + run_tests = RunTests( + repository=self._repository, + runtime=self._runtime, + code_index=self._code_index, + ) + test_observation = run_tests.execute( + RunTestsArgs( + scratch_pad=args.scratch_pad, + test_files=[args.path], + ), + file_context, + ) + + observation.properties.update(test_observation.properties) + observation.message += "\n\n" + test_observation.message + + return observation + + def validate_path( + self, file_context: FileContext, command: str, path: Path + ) -> str | None: + """ + Check that the path/command combination is valid. + """ + # TODO: Check if its an absolute path? + # if not path.is_absolute(): + # suggested_path = Path("") / path + # return ( + # f"The path {path} is not an absolute path, it should start with `/`. Maybe you meant {suggested_path}?" + # ) + + # Check if path exists + if not file_context.file_exists(str(path)) and command != "create": + return f"The path {path} does not exist. Please provide a valid path." + + if file_context.file_exists(str(path)) and command == "create": + return f"File already exists at: {path}. Cannot overwrite files using command `create`." + + # Check if the path points to a directory + if file_context._repo.is_directory(str(path)): + if command != "view": + return f"The path {path} is a directory and only the `view` command can be used on directories" + + return None + + def _view( + self, file_context: FileContext, path: Path, args: EditActionArguments + ) -> Observation: + context_file = file_context.get_context_file(str(path)) + if not context_file: + return Observation( + message=f"Could not get context for file: {path}", + properties={"fail_reason": "context_error"}, + ) + + file_content = context_file.content + init_line = 1 + file_lines = file_content.split("\n") + n_lines = len(file_lines) + + view_range = args.view_range + if view_range: + if len(view_range) != 2: + raise RetryException( + message="Invalid view_range. It should be a list of two integers.", + action_args=args, + ) + + init_line, final_line = view_range + + if init_line < 1 or init_line > n_lines: + raise RetryException( + message=f"Invalid view_range start line: {init_line}. Should be between 1 and {n_lines}", + action_args=args, + ) + + if final_line == -1: + file_content = "\n".join(file_lines[init_line - 1 :]) + else: + file_content = "\n".join(file_lines[init_line - 1 : final_line]) + else: + final_line = n_lines + + tokens = count_tokens(file_content) + if tokens > self.max_tokens_to_view: + view_context = FileContext(self._repository) + view_context.add_file(str(path), show_all_spans=True) + + file_content = view_context.create_prompt( + show_span_ids=True, + show_outcommented_code=True, + only_signatures=True, + show_line_numbers=True, + ) + + raise RetryException( + message=f"File {path} is too large ({tokens} tokens) to view in its entirety. Maximum allowed is {self.max_tokens_to_view} tokens. " + f"Please specify a line range using view_range or spans with ViewCode to view specific parts of the file.\n" + f"Here's a structure of the file {file_content}", + action_args=args, + ) + + properties = {} + added_spans = file_context.add_line_span_to_context( + str(path), init_line, final_line + ) + if not added_spans: + properties["flag"] = "no_new_spans" + + message = self._make_output(file_content, f"{path}", init_line) + + return Observation(message=message, properties=properties) + + def _create( + self, file_context: FileContext, path: Path, file_text: str + ) -> Observation: + if path.exists(): + return Observation( + message=f"File already exists at: {path}", + properties={"fail_reason": "file_exists"}, + ) + + context_file = file_context.add_file(str(path)) + context_file.apply_changes(file_text) + + diff = do_diff(str(path), "", file_text) + + return Observation( + message=f"File created successfully at: {path}", + properties={"diff": diff, "success": True}, + ) + + def _str_replace( + self, file_context: FileContext, path: Path, old_str: str, new_str: str + ) -> Observation: + SNIPPET_LINES = 4 + + context_file = file_context.get_context_file(str(path)) + if not context_file: + return Observation( + message=f"Could not get context for file: {path}", + properties={"fail_reason": "context_error"}, + ) + + file_content = context_file.content.expandtabs() + old_str = old_str.expandtabs() + new_str = new_str.expandtabs() + + if old_str == new_str: + return Observation( + message="The replacement string is the same as the original string. No changes were made.", + properties={"fail_reason": "no_changes"}, + ) + + occurrences = file_content.count(old_str) + if occurrences == 0: + new_str_occurrences = file_content.count(new_str) + if new_str_occurrences > 0: + return Observation( + message=f"New string '{new_str}' already exists in {path}. No changes were made.", + properties={"fail_reason": "string_already_exists"}, + ) + + return Observation( + message=f"String '{old_str}' not found in {path}", + properties={ + "fail_reason": "string_not_found", + "file_content": file_content, + }, + expect_correction=True, + ) + elif occurrences > 1: + file_str = file_content + lines = [] + pos = 0 + while True: + pos = file_str.find(old_str, pos) + if pos == -1: + break + # Count newlines before this occurrence to get line number + start_line = file_str.count("\n", 0, pos) + 1 + lines.append(start_line) + pos += 1 + + return Observation( + message=f"Multiple occurrences of string found starting at lines {lines}", + properties={ + "fail_reason": "multiple_occurrences", + "file_content": file_content, + }, + expect_correction=True, + ) + + properties = { + "flags": [], + } + + # Find the line numbers where new_str appears + lines = file_content.split("\n") + file_str = "\n".join(lines) # Ensure consistent line endings + + start_pos = file_str.find(new_str) + if start_pos != -1: + # Count newlines before the match to get starting line number + start_line = file_str.count("\n", 0, start_pos) + 1 + # Count newlines in old_str to get ending line number + end_line = start_line + old_str.count("\n") + + properties["start_line"] = start_line + properties["end_line"] = end_line + + # Check if these lines are in context + if not context_file.lines_is_in_context(start_line, end_line): + properties["flags"].append("lines_not_in_context") + context_file.add_line_span(start_line, end_line) + + new_file_content = file_content.replace(old_str, new_str) + + diff = do_diff(str(path), file_content, new_file_content) + if not diff: + properties["fail_reason"] = "no_changes" + return Observation( + message=f"No changes made to the file {path}. Was the replacement string the same as the original string?", + properties=properties, + ) + + context_file.apply_changes(new_file_content) + + # Create a snippet of the edited section + replacement_line = file_content.split(old_str)[0].count("\n") + start_line = max(0, replacement_line - SNIPPET_LINES) + end_line = replacement_line + SNIPPET_LINES + new_str.count("\n") + snippet = "\n".join(new_file_content.split("\n")[start_line : end_line + 1]) + + # Prepare the success message + success_msg = f"The file {path} has been edited. " + success_msg += self._make_output( + snippet, f"a snippet of {path}", start_line + 1 + ) + success_msg += "Review the changes and make sure they are as expected. Edit the file again if necessary." + + properties["success"] = True + properties["diff"] = diff + + return Observation( + message=success_msg, + properties=properties, + ) + + def _insert( + self, file_context: FileContext, path: Path, insert_line: int, new_str: str + ) -> Observation: + context_file = file_context.get_context_file(str(path)) + if not context_file: + return Observation( + message=f"Could not get context for file: {path}", + properties={"fail_reason": "context_error"}, + ) + + # Validate file exists and is not a directory + if not file_context.file_exists(str(path)): + return Observation( + message=f"File {path} not found.", + properties={"fail_reason": "file_not_found"}, + ) + file_text = context_file.content.expandtabs() + new_str = new_str.expandtabs() + file_text_lines = file_text.split("\n") + n_lines_file = len(file_text_lines) + + if insert_line < 0 or insert_line > len(file_text_lines): + return Observation( + message=f"Invalid `insert_line` parameter: {insert_line}. It should be within the range of lines of the file: {[0, n_lines_file]}", + properties={"fail_reason": "invalid_line_number"}, + expect_correction=True, + ) + + new_str_lines = new_str.split("\n") + new_file_text_lines = ( + file_text_lines[:insert_line] + + new_str_lines + + file_text_lines[insert_line:] + ) + snippet_lines = ( + file_text_lines[max(0, insert_line - SNIPPET_LINES) : insert_line] + + new_str_lines + + file_text_lines[insert_line : insert_line + SNIPPET_LINES] + ) + + new_file_text = "\n".join(new_file_text_lines) + snippet = "\n".join(snippet_lines) + + diff = do_diff(str(path), file_text, new_file_text) + + context_file.apply_changes(new_file_text) + + success_msg = f"The file {path} has been edited. " + success_msg += self._make_output( + snippet, + "a snippet of the edited file", + max(1, insert_line - SNIPPET_LINES + 1), + ) + success_msg += "Review the changes and make sure they are as expected (correct indentation, no duplicate lines, etc). Edit the file again if necessary." + + return Observation( + message=success_msg, + properties={"diff": diff, "success": True}, + ) + + def _make_output( + self, + file_content: str, + file_descriptor: str, + init_line: int = 1, + expand_tabs: bool = True, + ): + """Generate output for the CLI based on the content of a file.""" + file_content = maybe_truncate(file_content) + if expand_tabs: + file_content = file_content.expandtabs() + file_content = "\n".join( + [ + f"{i + init_line:6}\t{line}" + for i, line in enumerate(file_content.split("\n")) + ] + ) + return ( + f"Here's the result of running `cat -n` on {file_descriptor}:\n" + + file_content + + "\n" + ) + + def span_id_list(self, span_ids: set[str]) -> str: + list_str = "" + for span_id in span_ids: + list_str += f" * {span_id}\n" + return list_str + + +TRUNCATED_MESSAGE: str = "To save on context only part of this file has been shown to you. You should retry this tool after you have searched inside the file with `grep -n` in order to find the line numbers of what you are looking for." +MAX_RESPONSE_LEN: int = 16000 + + +def maybe_truncate(content: str, truncate_after: int | None = MAX_RESPONSE_LEN): + """Truncate content and append a notice if content exceeds the specified length.""" + return ( + content + if not truncate_after or len(content) <= truncate_after + else content[:truncate_after] + TRUNCATED_MESSAGE + ) diff --git a/moatless/actions/find_class.py b/moatless/actions/find_class.py new file mode 100644 index 00000000..f336f9ce --- /dev/null +++ b/moatless/actions/find_class.py @@ -0,0 +1,95 @@ +import logging +from typing import List, Type, ClassVar + +from pydantic import Field, model_validator + +from moatless.actions.model import ActionArguments, FewShotExample +from moatless.actions.search_base import SearchBaseAction, SearchBaseArgs +from moatless.index.types import SearchCodeResponse + +logger = logging.getLogger(__name__) + + +class FindClassArgs(SearchBaseArgs): + """Use this when you know the exact name of a class you want to find. + + Perfect for: + - Finding class implementations: class_name="UserRepository" + - Locating test classes: class_name="TestUserAuthentication" + - Finding base classes: class_name="BaseController" + - Finding classes in specific modules: class_name="Config", file_pattern="src/config/*.py" + """ + + class_name: str = Field( + ..., description="Specific class name to include in the search." + ) + + @model_validator(mode="after") + def validate_names(self) -> "FindClassArgs": + if not self.class_name.strip(): + raise ValueError("class_name cannot be empty") + return self + + class Config: + title = "FindClass" + + +class FindClass(SearchBaseAction): + args_schema: ClassVar[Type[ActionArguments]] = FindClassArgs + + def to_prompt(self): + prompt = f"Searching for class: {self.args.class_name}" + if self.args.file_pattern: + prompt += f" in files matching the pattern: {self.args.file_pattern}" + return prompt + + def _search(self, args: FindClassArgs) -> SearchCodeResponse: + logger.info( + f"{self.name}: {args.class_name} (file_pattern: {args.file_pattern})" + ) + return self._code_index.find_class( + args.class_name, file_pattern=args.file_pattern + ) + + def _select_span_instructions(self, search_result: SearchCodeResponse) -> str: + return ( + f"Here's the class structure." + f"Use the function ViewCode and specify the SpanIDs of the relevant functions to view them.\n" + ) + + def _search_for_alternative_suggestion( + self, args: FindClassArgs + ) -> SearchCodeResponse: + if args.file_pattern: + return self._code_index.find_class(args.class_name, file_pattern=None) + return SearchCodeResponse() + + @classmethod + def get_evaluation_criteria(cls, trajectory_length) -> List[str]: + criteria = super().get_evaluation_criteria(trajectory_length) + criteria.extend( + [ + "Identifier Correctness: Verify that the class name is accurate.", + ] + ) + return criteria + + @classmethod + def get_few_shot_examples(cls) -> List[FewShotExample]: + return [ + FewShotExample.create( + user_input="I need to see the implementation of the DatabaseManager class to understand how it handles transactions", + action=FindClassArgs( + scratch_pad="To examine how the DatabaseManager class handles transactions, we need to locate its implementation in the codebase.", + class_name="DatabaseManager", + ), + ), + FewShotExample.create( + user_input="Show me the UserAuthentication class in the auth module", + action=FindClassArgs( + scratch_pad="Looking for the UserAuthentication class specifically in the authentication module.", + class_name="UserAuthentication", + file_pattern="auth/*.py", + ), + ), + ] diff --git a/moatless/actions/find_code_snippet.py b/moatless/actions/find_code_snippet.py new file mode 100644 index 00000000..ebbc5218 --- /dev/null +++ b/moatless/actions/find_code_snippet.py @@ -0,0 +1,98 @@ +import logging +from typing import List, Optional, Type, ClassVar + +from pydantic import Field, model_validator + +from moatless.actions.model import ActionArguments, FewShotExample +from moatless.actions.search_base import SearchBaseAction, SearchBaseArgs +from moatless.file_context import FileContext + +logger = logging.getLogger(__name__) + + +class FindCodeSnippetArgs(SearchBaseArgs): + """Use this when you know the exact code you want to find. + It will run the command: grep -n -r "code_snippet" "file_pattern" + + Perfect for: + - Finding specific constant definitions: code_snippet="MAX_RETRIES = 3" + - Finding decorator usage: code_snippet="@retry(max_attempts=3)" + - Finding specific imports: code_snippet="from datetime import datetime" + - Finding configuration patterns: code_snippet="DEBUG = os.getenv('DEBUG', False)" + + Note: You must know the exact code snippet. Use SemanticSearch if you only know + what the code does but not its exact implementation. + """ + + code_snippet: str = Field(..., description="The exact code snippet to find.") + file_pattern: Optional[str] = Field( + default=None, + description="A glob pattern to filter search results to specific file types or directories. ", + ) + + class Config: + title = "FindCodeSnippet" + + @model_validator(mode="after") + def validate_snippet(self) -> "FindCodeSnippetArgs": + if not self.code_snippet.strip(): + raise ValueError("code_snippet cannot be empty") + return self + + def to_prompt(self): + prompt = f"Searching for code snippet: {self.code_snippet}" + if self.file_pattern: + prompt += f" in files matching the pattern: {self.file_pattern}" + return prompt + + +class FindCodeSnippet(SearchBaseAction): + args_schema: ClassVar[Type[ActionArguments]] = FindCodeSnippetArgs + + def _search_for_context(self, args: FindCodeSnippetArgs) -> FileContext: + logger.info( + f"{self.name}: {args.code_snippet} (file_pattern: {args.file_pattern})" + ) + + matches = self._repository.find_exact_matches( + search_text=args.code_snippet, file_pattern=args.file_pattern + ) + + search_result_context = FileContext(repo=self._repository) + for file_path, start_line in matches: + num_lines = len(args.code_snippet.splitlines()) + end_line = start_line + num_lines - 1 + + search_result_context.add_line_span_to_context( + file_path, start_line, end_line + ) + + return search_result_context + + @classmethod + def get_few_shot_examples(cls) -> List[FewShotExample]: + return [ + FewShotExample.create( + user_input="I need to understand how the User class is structured in our authentication system. Let me find its definition.", + action=FindCodeSnippetArgs( + scratch_pad="To find the User class definition, I'll search for the exact class declaration line 'class User(BaseModel):'", + code_snippet="class User(BaseModel):", + ), + ), + FewShotExample.create( + user_input="The system seems to use a default timeout value. I should check where DEFAULT_TIMEOUT is defined in the configuration.", + action=FindCodeSnippetArgs( + scratch_pad="To find the timeout configuration, I'll search for the exact variable declaration 'DEFAULT_TIMEOUT =' in config files", + code_snippet="DEFAULT_TIMEOUT =", + file_pattern="**/config/*.py", + ), + ), + FewShotExample.create( + user_input="To understand how request processing works, I need to examine the _handlers dictionary in the processor service.", + action=FindCodeSnippetArgs( + scratch_pad="To find the handlers mapping, I'll search for the exact dictionary declaration '_handlers =' in the processor service", + code_snippet="_handlers =", + file_pattern="services/processor.py", + ), + ), + ] diff --git a/moatless/actions/find_function.py b/moatless/actions/find_function.py new file mode 100644 index 00000000..d6f3c95d --- /dev/null +++ b/moatless/actions/find_function.py @@ -0,0 +1,133 @@ +from typing import Optional, List, Type, ClassVar + +from pydantic import Field, model_validator + +from moatless.actions.model import ActionArguments, FewShotExample +from moatless.actions.search_base import SearchBaseAction, SearchBaseArgs, logger +from moatless.codeblocks import CodeBlockType +from moatless.index.types import SearchCodeResponse, SearchCodeHit, SpanHit + + +class FindFunctionArgs(SearchBaseArgs): + """Use this when you know the exact name of a function or method you want to find. + + Perfect for: + - Finding test cases: function_name="test_user_login" + - Locating specific implementations: function_name="process_payment" + - Finding all methods with a name: function_name="validate" + - Finding a specific class method: function_name="save", class_name="UserRepository" + """ + + function_name: str = Field( + ..., + description="The exact name of the function or method you want to find. Must match the function definition in code.", + ) + class_name: Optional[str] = Field( + default=None, + description="Optional class name if searching for a specific class method. Leave empty for standalone functions.", + ) + + @model_validator(mode="after") + def validate_names(self) -> "FindFunctionArgs": + if not self.function_name.strip(): + raise ValueError("function_name cannot be empty") + if self.class_name is not None and not self.class_name.strip(): + raise ValueError("class_name must be None or non-empty") + return self + + class Config: + title = "FindFunction" + + def to_prompt(self): + prompt = f"Searching for function: {self.function_name}" + if self.class_name: + prompt += f" in class: {self.class_name}" + if self.file_pattern: + prompt += f" in files matching the pattern: {self.file_pattern}" + return prompt + + +class FindFunction(SearchBaseAction): + args_schema: ClassVar[Type[ActionArguments]] = FindFunctionArgs + + def _search(self, args: FindFunctionArgs) -> SearchCodeResponse: + logger.info( + f"{self.name}: {args.function_name} (class_name: {args.class_name}, file_pattern: {args.file_pattern})" + ) + return self._code_index.find_function( + args.function_name, + class_name=args.class_name, + file_pattern=args.file_pattern, + ) + + def _search_for_alternative_suggestion( + self, args: FindFunctionArgs + ) -> SearchCodeResponse: + """Return methods in the same class or other methods in same file with the method name the method in class is not found.""" + + if args.class_name and args.file_pattern: + file = self._repository.get_file(args.file_pattern) + + span_ids = [] + if file and file.module: + class_block = file.module.find_by_identifier(args.class_name) + if class_block and class_block.type == CodeBlockType.CLASS: + function_blocks = class_block.find_blocks_with_type( + CodeBlockType.FUNCTION + ) + for function_block in function_blocks: + span_ids.append(function_block.belongs_to_span.span_id) + + function_blocks = file.module.find_blocks_with_identifier( + args.function_name + ) + for function_block in function_blocks: + span_ids.append(function_block.belongs_to_span.span_id) + + if span_ids: + return SearchCodeResponse( + hits=[ + SearchCodeHit( + file_path=args.file_pattern, + spans=[SpanHit(span_id=span_id) for span_id in span_ids], + ) + ] + ) + + return self._code_index.find_class( + args.class_name, file_pattern=args.file_pattern + ) + + return SearchCodeResponse() + + @classmethod + def get_evaluation_criteria(cls, trajectory_length) -> List[str]: + criteria = super().get_evaluation_criteria(trajectory_length) + criteria.extend( + [ + "Function Identifier Accuracy: Ensure that the function name is correctly specified.", + "Class Name Appropriateness: Verify that the class names, if any, are appropriate.", + ] + ) + return criteria + + @classmethod + def get_few_shot_examples(cls) -> List[FewShotExample]: + return [ + FewShotExample.create( + user_input="Find the calculate_interest function in our financial module to review its logic", + action=FindFunctionArgs( + scratch_pad="To review the logic of the calculate_interest function, we need to locate its implementation in the financial module.", + function_name="calculate_interest", + file_pattern="financial/**/*.py", + ), + ), + FewShotExample.create( + user_input="Show me the validate_token method in the JWTAuthenticator class", + action=FindFunctionArgs( + scratch_pad="Looking for the validate_token method specifically within the JWTAuthenticator class to examine the token validation logic.", + function_name="validate_token", + class_name="JWTAuthenticator", + ), + ), + ] diff --git a/moatless/actions/finish.py b/moatless/actions/finish.py new file mode 100644 index 00000000..3d3c6837 --- /dev/null +++ b/moatless/actions/finish.py @@ -0,0 +1,77 @@ +from typing import ClassVar, List + +from litellm import Type +from pydantic import Field + +from moatless.actions.action import Action +from moatless.actions.model import ( + ActionArguments, + Observation, + FewShotExample, +) +from moatless.file_context import FileContext + + +class FinishArgs(ActionArguments): + """Indicate that the task is fully completed.""" + + scratch_pad: str = Field( + ..., description="Your reasoning about why the task is complete." + ) + finish_reason: str = Field(..., description="Explanation of completion.") + + class Config: + title = "Finish" + + def to_prompt(self): + return f"Finish with reason: {self.finish_reason}" + + def equals(self, other: "ActionArguments") -> bool: + return isinstance(other, FinishArgs) + + +class Finish(Action): + args_schema: ClassVar[Type[ActionArguments]] = FinishArgs + + def execute(self, args: FinishArgs, file_context: FileContext | None = None): + return Observation(message=args.finish_reason, terminal=True) + + @classmethod + def get_few_shot_examples(cls) -> List[FewShotExample]: + return [ + FewShotExample.create( + user_input="""Applied the change to tests/auth_tests/test_validators.py + +```diff +--- tests/auth_tests/test_validators.py ++++ tests/auth_tests/test_validators.py +@@ -238,6 +238,7 @@ + "o'connell", "عبد ال", + "zerowidth\u200bspace", "nonbreaking\u00a0space", + "en\u2013dash", ++ "\njoe", "\nRené", "\nUSERNAME", "أحمد\n", + ] + v = validators.UnicodeUsernameValidator() + for valid in valid_usernames: +@@ -250,7 +251,7 @@ + + def test_ascii_validator(self): + valid_usernames = ['glenn', 'GLEnN', 'jean-marc'] +- invalid_usernames = ["o'connell", 'Éric', 'jean marc', "أحمد"] ++ invalid_usernames = ["o'connell", 'Éric', 'jean marc', "أحمد", "\nglenn", "\nGLEnN", "\njean-marc", "USERNAME\n"] + v = validators.ASCIIUsernameValidator() + for valid in valid_usernames: + with self.subTest(valid=valid): + +Running 45 tests in the following files: + * tests/calculator/test_operations.py + * tests/calculator/test_parser.py + +45 passed. 0 failed. 0 errors. +""", + action=FinishArgs( + scratch_pad="The implementation has been completed in two steps: First, the username validators were updated to handle leading and trailing newlines correctly. Then, the test suite was enhanced to verify this functionality. The test additions cover both Unicode and ASCII username validators with comprehensive edge cases. All tests are now passing, confirming both the implementation and test coverage are correct.", + finish_reason="Username validators now properly handle leading and trailing newlines in usernames, with comprehensive test coverage verifying the functionality.", + ), + ) + ] diff --git a/moatless/actions/insert_line.py b/moatless/actions/insert_line.py new file mode 100644 index 00000000..ddbc2b53 --- /dev/null +++ b/moatless/actions/insert_line.py @@ -0,0 +1,194 @@ +import logging +from pathlib import Path +from typing import List + +from pydantic import Field + +from moatless.actions.action import Action +from moatless.actions.code_modification_mixin import CodeModificationMixin +from moatless.actions.model import ActionArguments, Observation, FewShotExample +from moatless.actions.run_tests import RunTests, RunTestsArgs +from moatless.file_context import FileContext +from moatless.index import CodeIndex +from moatless.repository.file import do_diff +from moatless.repository.repository import Repository +from moatless.runtime.runtime import RuntimeEnvironment + +logger = logging.getLogger(__name__) + +SNIPPET_LINES = 4 + + +class InsertLineArgs(ActionArguments): + """ + Insert text at a specific line number in a file. + + Notes: + * The text will be inserted AFTER the specified line number + * Line numbers start at 1 + * The insert_line must be within the valid range of lines in the file + * Proper indentation should be maintained in the inserted text + """ + + path: str = Field(..., description="Path to the file to edit") + insert_line: int = Field( + ..., + description="Line number after which to insert the new text (indexing starts at 1)", + ) + new_str: str = Field( + ..., description="Text content to insert at the specified line" + ) + + class Config: + title = "InsertLines" + + +class InsertLine(Action, CodeModificationMixin): + """ + Action to insert text at a specific line in a file. + """ + + args_schema = InsertLineArgs + + def __init__( + self, + runtime: RuntimeEnvironment | None = None, + code_index: CodeIndex | None = None, + repository: Repository | None = None, + **data, + ): + super().__init__(**data) + # Initialize mixin attributes directly + object.__setattr__(self, "_runtime", runtime) + object.__setattr__(self, "_code_index", code_index) + object.__setattr__(self, "_repository", repository) + + def execute(self, args: InsertLineArgs, file_context: FileContext) -> Observation: + if args.path.startswith("/repo"): + args.path = args.path[5:] + if args.path.startswith("/"): + args.path = args.path[1:] + + path = Path(args.path) + + if not file_context.file_exists(str(path)): + return Observation( + message=f"File {path} not found.", + properties={"fail_reason": "file_not_found"}, + ) + + context_file = file_context.get_context_file(str(path)) + if not context_file: + return Observation( + message=f"Could not get context for file: {path}", + properties={"fail_reason": "context_error"}, + ) + + file_text = context_file.content.expandtabs() + new_str = args.new_str.expandtabs() + file_text_lines = file_text.split("\n") + n_lines_file = len(file_text_lines) + + if args.insert_line < 0 or args.insert_line > len(file_text_lines): + return Observation( + message=f"Invalid `insert_line` parameter: {args.insert_line}. It should be within the range of lines of the file: [0, {n_lines_file}]", + properties={"fail_reason": "invalid_line_number"}, + expect_correction=True, + ) + + new_str_lines = new_str.split("\n") + new_file_text_lines = ( + file_text_lines[: args.insert_line] + + new_str_lines + + file_text_lines[args.insert_line :] + ) + snippet_lines = ( + file_text_lines[max(0, args.insert_line - SNIPPET_LINES) : args.insert_line] + + new_str_lines + + file_text_lines[args.insert_line : args.insert_line + SNIPPET_LINES] + ) + + new_file_text = "\n".join(new_file_text_lines) + snippet = "\n".join(snippet_lines) + + diff = do_diff(str(path), file_text, new_file_text) + context_file.apply_changes(new_file_text) + + # Format the snippet with line numbers + snippet_with_lines = "\n".join( + f"{i + max(1, args.insert_line - SNIPPET_LINES + 1):6}\t{line}" + for i, line in enumerate(snippet.split("\n")) + ) + + success_msg = ( + f"The file {path} has been edited. Here's the result of running `cat -n` " + f"on a snippet of the edited file:\n{snippet_with_lines}\n" + "Review the changes and make sure they are as expected (correct indentation, no duplicate lines, etc). " + "Edit the file again if necessary." + ) + + observation = Observation( + message=success_msg, + properties={"diff": diff, "success": True}, + ) + + if not self._runtime: + return observation + + run_tests = RunTests( + repository=self._repository, + runtime=self._runtime, + code_index=self._code_index, + ) + test_observation = run_tests.execute( + RunTestsArgs( + scratch_pad=args.scratch_pad, + test_files=[args.path], + ), + file_context, + ) + + observation.properties.update(test_observation.properties) + observation.message += "\n\n" + test_observation.message + + return observation + + @classmethod + def get_few_shot_examples(cls) -> List[FewShotExample]: + return [ + FewShotExample.create( + user_input="Add a new import statement at the beginning of the file", + action=InsertLineArgs( + scratch_pad="Adding import for datetime module", + path="utils/time_helper.py", + insert_line=1, + new_str="from datetime import datetime, timezone", + ), + ), + FewShotExample.create( + user_input="Add a new method to the UserProfile class", + action=InsertLineArgs( + scratch_pad="Adding a method to update user preferences", + path="models/user.py", + insert_line=15, + new_str=""" def update_preferences(self, preferences: dict) -> None: + self._preferences.update(preferences) + self._last_updated = datetime.now(timezone.utc) + logger.info(f"Updated preferences for user {self.username}")""", + ), + ), + FewShotExample.create( + user_input="Add a new configuration option", + action=InsertLineArgs( + scratch_pad="Adding Redis configuration settings", + path="config/settings.py", + insert_line=25, + new_str="""REDIS_CONFIG = { + 'host': 'localhost', + 'port': 6379, + 'db': 0, + 'password': None +}""", + ), + ), + ] diff --git a/moatless/actions/model.py b/moatless/actions/model.py new file mode 100644 index 00000000..4d320149 --- /dev/null +++ b/moatless/actions/model.py @@ -0,0 +1,172 @@ +import importlib +import logging +import pkgutil +from abc import ABC +from typing import Dict, Type, Any, Optional + +from instructor.utils import classproperty +from pydantic import Field, BaseModel, model_validator + +from moatless.completion.model import ToolCall, Completion, StructuredOutput + +logger = logging.getLogger(__name__) + + +_action_args: Dict[str, Type["ActionArguments"]] = {} + + +class ActionArguments(StructuredOutput, ABC): + scratch_pad: str = Field(description="Your reasoning for the action.") + + class Config: + title = "Action" + + @classproperty + def name(cls): + return cls.Config.title if hasattr(cls.Config, "title") else cls.__name__ + + def to_tool_call(self) -> ToolCall: + return ToolCall(name=self.name, input=self.model_dump()) + + @classmethod + def from_tool_call(cls, tool_args: dict[str, Any], tool_name: str | None = None): + return cls(**tool_args) + + def equals(self, other: "ActionArguments") -> bool: + return self.model_dump(exclude={"scratch_pad"}) == other.model_dump( + exclude={"scratch_pad"} + ) + + def to_prompt(self): + prompt = f"Action: {self.name}\n" + prompt += "\n".join( + [f" {k}: {v}" for k, v in self.model_dump(exclude={"scratch_pad"}).items()] + ) + return prompt + + @model_validator(mode="before") + @classmethod + def fix_scratch_pad(cls, data: Any) -> Any: + """Allow scratch_pad to be null.""" + if isinstance(data, dict): + if not data.get("scratch_pad"): + data["scratch_pad"] = "" + + return data + + @model_validator(mode="before") + @classmethod + def fix_null_fields(cls, data: Any) -> Any: + """Allow scratch_pad to be null.""" + if isinstance(data, dict): + for key, value in data.items(): + if value == "null": + data[key] = None + + return data + + @classmethod + def get_action_args(cls, action_name: str) -> Type["ActionArguments"]: + """ + Dynamically import and return the appropriate ActionArguments class for the given action. + """ + if not _action_args: + cls._load_action_args() + + action_args = _action_args.get(action_name) + if action_args: + return action_args + + raise ValueError(f"Unknown action: {action_name}") + + @classmethod + def _load_action_args(cls): + actions_package = importlib.import_module("moatless.actions") + + for _, module_name, _ in pkgutil.iter_modules(actions_package.__path__): + full_module_name = f"moatless.actions.{module_name}" + module = importlib.import_module(full_module_name) + for name, obj in module.__dict__.items(): + if ( + isinstance(obj, type) + and issubclass(obj, ActionArguments) + and obj != ActionArguments + ): + _action_args[name] = obj + + @classmethod + def model_validate(cls, obj: Any) -> "ActionArguments": + if isinstance(obj, dict): + obj = obj.copy() + action_args_class_path = obj.pop("action_args_class", None) + if ( + action_args_class_path + == "moatless.actions.request_context.RequestMoreContextArgs" + ): + action_args_class_path = "moatless.actions.view_code.ViewCodeArgs" + + if action_args_class_path: + module_name, class_name = action_args_class_path.rsplit(".", 1) + module = importlib.import_module(module_name) + action_args_class = getattr(module, class_name) + return action_args_class.model_validate(obj) + return super().model_validate(obj) + + +class Observation(BaseModel): + message: str = Field( + description="The message returned to the agent, will be displayed in message history." + ) + summary: Optional[str] = Field( + None, + description="Summary of the observation, will be displayed in summarised message history.", + ) + terminal: bool = Field( + False, description="Indicates if this action results in a terminal state" + ) + expect_correction: bool = Field( + False, + description="Indicates that a the action arguments was inccorect and we expect a correction", + ) + properties: Optional[Dict[str, Any]] = Field( + default_factory=dict, description="Additional properties" + ) + execution_completion: Optional[Completion] = Field( + None, description="Completion created when executing the action" + ) + + @classmethod + def create(cls, message: str, terminal: bool = False): + return cls(message=message, terminal=terminal) + + +class FewShotExample(BaseModel): + user_input: str = Field(..., description="The user's input/question") + action: ActionArguments = Field( + ..., description="The expected response as ActionArguments" + ) + + @classmethod + def create(cls, user_input: str, action: ActionArguments) -> "FewShotExample": + return cls(user_input=user_input, action=action) + + +class ActionError(ActionArguments): + """Error""" + + error: str = Field(..., description="Error.") + + class Config: + title = "Error" + + def to_prompt(self): + return f"Error: {self.error}" + + +class RetryException(Exception): + """Exception raised when an action needs to be retried with corrected arguments.""" + + def __init__(self, message: str, action_args: ActionArguments): + super().__init__(message) + self.message = message + self.action_args = action_args diff --git a/moatless/actions/reject.py b/moatless/actions/reject.py new file mode 100644 index 00000000..8790190f --- /dev/null +++ b/moatless/actions/reject.py @@ -0,0 +1,29 @@ +from typing import Type, ClassVar + +from pydantic import Field + +from moatless.actions.action import Action +from moatless.actions.model import ActionArguments, Observation +from moatless.file_context import FileContext + + +class RejectArgs(ActionArguments): + """Reject the task and explain why.""" + + rejection_reason: str = Field(..., description="Explanation for rejection.") + + class Config: + title = "Reject" + + def to_prompt(self): + return f"Reject with reason: {self.rejection_reason}" + + def equals(self, other: "ActionArguments") -> bool: + return isinstance(other, RejectArgs) + + +class Reject(Action): + args_schema: ClassVar[Type[ActionArguments]] = RejectArgs + + def execute(self, args: RejectArgs, file_context: FileContext | None = None): + return Observation(message=args.rejection_reason, terminal=True) diff --git a/moatless/actions/run_tests.py b/moatless/actions/run_tests.py new file mode 100644 index 00000000..f15615c9 --- /dev/null +++ b/moatless/actions/run_tests.py @@ -0,0 +1,222 @@ +import logging +from typing import List, Any + +from pydantic import Field, PrivateAttr + +from moatless.actions.action import Action +from moatless.actions.model import ( + ActionArguments, + FewShotExample, + Observation +) +from moatless.file_context import FileContext +from moatless.index.code_index import CodeIndex, is_test +from moatless.repository.repository import Repository +from moatless.runtime.runtime import RuntimeEnvironment, TestResult, TestStatus +from moatless.utils.tokenizer import count_tokens + +logger = logging.getLogger(__name__) + + +class RunTestsArgs(ActionArguments): + """ + Run the specified unit tests on the codebase. + """ + + scratch_pad: str = Field(..., description="Your reasoning on what tests to run.") + test_files: List[str] = Field(..., description="The list of test files to run") + + class Config: + title = "RunTests" + + @property + def log_name(self): + return f"RunTests({', '.join(self.test_files)})" + + def to_prompt(self): + return f"Running tests for the following files:\n" + "\n".join( + f"* {file}" for file in self.test_files + ) + + +class RunTests(Action): + args_schema = RunTestsArgs + + max_output_tokens: int = Field( + 2000, + description="The maximum number of tokens in the test result output message", + ) + + _code_index: CodeIndex = PrivateAttr() + _repository: Repository = PrivateAttr() + _runtime: RuntimeEnvironment = PrivateAttr() + + def __init__( + self, + code_index: CodeIndex | None = None, + repository: Repository | None = None, + runtime: RuntimeEnvironment | None = None, + **data, + ): + super().__init__(**data) + self._repository = repository + self._code_index = code_index + self._runtime = runtime + + def execute( + self, args: RunTestsArgs, file_context: FileContext | None = None + ) -> Observation: + """ + Run all tests found in file context or provided in args. + """ + if file_context is None: + raise ValueError( + "File context must be provided to execute the run tests action." + ) + + test_files = [ + test_file + for test_file in args.test_files + if file_context.get_file(test_file) is not None and is_test(test_file) + ] + + if not test_files: + file_paths = args.test_files + if not file_paths: + file_paths = [file.file_path for file in file_context.files] + + for file_path in file_paths: + search_results = self._code_index.find_test_files( + file_path, query=file_path, max_results=2, max_spans=2 + ) + + for search_result in search_results: + test_files.append(search_result.file_path) + + for test_file in test_files: + if not file_context.has_file(test_file): + logger.info(f"Adding test file: {test_file} to context") + file_context.add_file(test_file, add_extra=False) + + test_files = [ + file.file_path for file in file_context.files if is_test(file.file_path) + ] + + logger.info(f"Running tests: {test_files}") + patch = file_context.generate_git_patch() + test_results = self._runtime.run_tests(patch, test_files) + failing_tests = [ + issue + for issue in test_results + if issue.status in [TestStatus.FAILED, TestStatus.ERROR] + ] + + tests_with_output = [ + issue for issue in failing_tests if issue.message and issue.file_path + ] + + if failing_tests: + logger.info( + f"{len(failing_tests)} out of {len(test_results)} tests failed. " + f"Include spans for {len(tests_with_output)} tests with output." + ) + + # Add failing tests to context. + failed_test_spans_by_file_path: dict = {} + for test_result in tests_with_output: + if test_result.file_path: + failed_test_spans_by_file_path.setdefault( + test_result.file_path, [] + ).append(test_result.span_id) + + for test_file in test_files: + failed_span_ids = failed_test_spans_by_file_path.get(test_file) + if failed_span_ids: + test_context_file = file_context.get_file(test_file) + test_context_file.add_spans(failed_span_ids) + + return self.create_output(test_results, test_files) + + def create_output( + self, test_results: List[TestResult], test_files: List[str] + ) -> Observation: + if not test_results: + return Observation( + message="No tests were run", + properties={"test_results": [], "fail_reason": "no_tests"}, + ) + + failure_count = sum( + 1 for issue in test_results if issue.status == TestStatus.FAILED + ) + error_count = sum( + 1 for issue in test_results if issue.status == TestStatus.ERROR + ) + + passed_count = len(test_results) - failure_count - error_count + + test_result_strings = [] + token_count = 0 + + for i, test_result in enumerate(test_results): + if not test_result.message or test_result.status not in [ + TestStatus.FAILED, + TestStatus.ERROR, + ]: + continue + + attributes = "" + if test_result.file_path: + attributes += f"{test_result.file_path}" + + if test_result.span_id: + attributes += f" {test_result.span_id}" + + if test_result.line: + attributes += f", line: {test_result.line}" + + test_output = f"* {test_result.status.value} {attributes}>\n```\n{test_result.message}\n```\n" + test_output_tokens = count_tokens(test_output) + if token_count + test_output_tokens > self.max_output_tokens: + logger.warning( + f"Test output message exceeds max token limit ({self.max_output_tokens})." + ) + break + + token_count += test_output_tokens + + test_result_strings.append(test_output) + + response_msg = f"Running {len(test_results)} tests in the following files:" + for test_file in test_files: + response_msg += f"\n * {test_file}" + + if test_result_strings: + response_msg += "\n\n" + response_msg += "\n".join(test_result_strings) + + response_msg += ( + f"\n\n{passed_count} passed. {failure_count} failed. {error_count} errors." + ) + + result_dicts = [result.model_dump() for result in test_results] + + return Observation( + message=response_msg, + properties={"test_results": result_dicts}, + ) + + @classmethod + def get_few_shot_examples(cls) -> List[FewShotExample]: + return [ + FewShotExample.create( + user_input="Run the tests for our authentication module to verify the recent changes to the login flow", + action=RunTestsArgs( + scratch_pad="We need to run the authentication tests to ensure the login flow changes haven't introduced any regressions.", + test_files=[ + "tests/auth/test_authentication.py", + "tests/auth/test_login.py", + ], + ), + ) + ] diff --git a/moatless/actions/search_base.py b/moatless/actions/search_base.py new file mode 100644 index 00000000..159e3824 --- /dev/null +++ b/moatless/actions/search_base.py @@ -0,0 +1,311 @@ +import logging +from abc import ABC +from typing import List, Optional, Type, Any, ClassVar, Tuple + +from pydantic import Field, PrivateAttr, BaseModel + +from moatless.actions.action import Action +from moatless.actions.model import ActionArguments, Observation +from moatless.completion import CompletionModel +from moatless.completion.model import UserMessage, AssistantMessage, Completion +from moatless.exceptions import CompletionRejectError +from moatless.file_context import FileContext +from moatless.index import CodeIndex +from moatless.index.types import SearchCodeResponse +from moatless.repository.repository import Repository + +logger = logging.getLogger(__name__) + +IDENTIFY_SYSTEM_PROMPT = """You are an autonomous AI assistant tasked with identifying relevant code in a codebase. Your goal is to select key code sections from the search results that are most relevant to the search request. + +The previous messages will contain: +1. A search request from an AI assistant +2. Search results containing various code sections with their line numbers + +# Your Task: + +1. Understand the Search Request: + * Analyze the previous search request to understand what code elements are being looked for + * Identify key elements such as functions, variables, classes, or patterns that are relevant + +2. Evaluate Search Results: + * Examine each code section in the search results for alignment with the search request + * Assess the relevance and importance of each code section + * Consider the complete context of code sections + +3. Respond with the Identify Action: + * Select and respond with the code sections that best match the search request + * Provide your analysis in the scratch_pad field + * List the relevant file paths with start and end line numbers in the identified_spans field +""" + + +class SearchBaseArgs(ActionArguments, ABC): + file_pattern: Optional[str] = Field( + default=None, + description="A glob pattern to filter search results to specific files or directories.", + ) + + +class IdentifiedSpans(BaseModel): + file_path: str = Field( + description="The file path where the relevant code is found." + ) + start_line: int = Field( + description="Starting line number of the relevant code section." + ) + end_line: int = Field( + description="Ending line number of the relevant code section." + ) + + +class Identify(ActionArguments): + """Identify if the provided search result is relevant to the reported issue.""" + + scratch_pad: str = Field( + ..., + description="Your thoughts and analysis on the search results and how they relate to the reported issue.", + ) + + identified_spans: Optional[list[IdentifiedSpans]] = Field( + default=None, + description="Files and code sections in the search results identified as relevant to the reported issue.", + ) + + +class SearchBaseAction(Action): + args_schema: ClassVar[Type[ActionArguments]] = SearchBaseArgs + + max_search_tokens: int = Field( + 2000, + description="The maximum number of tokens allowed in the search results.", + ) + max_identify_tokens: int = Field( + 8000, + description="The maximum number of tokens allowed in the identified code sections.", + ) + max_hits: int = Field( + 10, + description="The maximum number of search hits to display.", + ) + completion_model: Optional[CompletionModel] = Field( + None, + description="The completion model used to identify relevant code sections in search results.", + ) + + _repository: Repository = PrivateAttr() + _code_index: CodeIndex = PrivateAttr() + + def __init__( + self, + repository: Repository | None = None, + code_index: CodeIndex | None = None, + completion_model: CompletionModel | None = None, + **data, + ): + super().__init__(completion_model=completion_model, **data) + self._repository = repository + self._code_index = code_index + + def execute( + self, args: SearchBaseArgs, file_context: FileContext | None = None + ) -> Observation: + if file_context is None: + raise ValueError( + "File context must be provided to execute the search action." + ) + + properties = {"search_hits": [], "search_tokens": 0} + + search_result_context = self._search_for_context(args) + + if search_result_context.is_empty(): + properties["fail_reason"] = "no_search_hits" + return Observation(message="No search results found", properties=properties) + + properties["search_tokens"] = search_result_context.context_size() + + completion = None + if ( + search_result_context.context_size() > self.max_search_tokens + or search_result_context.span_count() > self.max_hits + ): + logger.info( + f"{self.name}: Search too large. {properties['search_tokens']} tokens and {search_result_context.span_count()} hits, will ask for clarification." + ) + view_context, completion = self._identify_code(args, search_result_context) + else: + view_context = search_result_context + + span_count = search_result_context.span_count() + search_result_str = f"Found {span_count} code sections." + + if view_context.is_empty(): + search_result_str += ( + "\n\nNone of the search results was relevant to the task." + ) + summary = "Didn't find any relevant code sections in the search results." + message = search_result_str + else: + summary = "Found relevant code sections in the search results." + search_result_str += "\n\nViewed relevant code:" + message = ( + search_result_str + + "\n" + + view_context.create_prompt( + show_span_ids=False, + show_line_numbers=True, + exclude_comments=False, + show_outcommented_code=True, + ) + ) + + new_span_ids = file_context.add_file_context(view_context) + properties["new_span_ids"] = new_span_ids + + logger.info( + f"{self.name}: Found {span_count} code sections in search results. Viewed {view_context.span_count()} code sections." + ) + + return Observation( + message=message, + summary=summary, + properties=properties, + execution_completion=completion, + ) + + def _search_for_context(self, args: SearchBaseArgs) -> FileContext: + search_result = self._search(args) + if not search_result.hits: + search_result = self._search_for_alternative_suggestion(args) + logger.info( + f"{self.name}: No relevant search results found. Will use alternative suggestion with {search_result.hits} hits." + ) + + span_count = 0 + search_result_context = FileContext(repo=self._repository) + for hit in search_result.hits: + span_count += len(hit.spans) + for span in hit.spans: + search_result_context.add_span_to_context( + hit.file_path, span.span_id, add_extra=True + ) + + return search_result_context + + def _select_span_instructions(self, search_result: SearchCodeResponse) -> str: + if not self.add_to_context: + return f"Here's the search result with the first line of codes in each code block. Use ViewCode to view specific code sections. " + + return f"The search result is too large. You must identify the relevant code sections in the search results to use them. " + + def _select_span_response_prompt(self, search_result: SearchCodeResponse) -> str: + search_result_context = FileContext(repo=self._repository) + for hit in search_result.hits: + for span in hit.spans: + search_result_context.add_span_to_context( + hit.file_path, span.span_id, add_extra=False + ) + + search_result_str = search_result_context.create_prompt( + show_span_ids=False, + show_line_numbers=True, + exclude_comments=False, + show_outcommented_code=True, + outcomment_code_comment="...", + # only_signatures=True + ) + + prompt = self._select_span_instructions(search_result) + prompt += f"\n\n{search_result_str}\n\n" + return prompt + + def _search(self, args: SearchBaseArgs) -> SearchCodeResponse: + raise NotImplementedError("Subclasses must implement this method.") + + def _search_for_alternative_suggestion( + self, args: SearchBaseArgs + ) -> SearchCodeResponse: + return SearchCodeResponse() + + def _identify_code( + self, args: SearchBaseArgs, search_result_ctx: FileContext + ) -> Tuple[IdentifiedSpans, Completion]: + search_result_str = search_result_ctx.create_prompt( + show_span_ids=True, + show_line_numbers=True, + exclude_comments=False, + show_outcommented_code=True, + outcomment_code_comment="...", + ) + + content = "Search request:" + content += f"\n{args.to_prompt()}" + + content += "\n\nIdentify the relevant code sections in the search results to use them. " + content += f"\n\n\n{search_result_str}\n\n" + identify_message = UserMessage(content=content) + + messages = [identify_message] + completion = None + + MAX_RETRIES = 3 + for retry_attempt in range(MAX_RETRIES): + identified_code, completion = self.completion_model.create_completion( + messages=messages, + system_prompt=IDENTIFY_SYSTEM_PROMPT, + response_model=Identify, + ) + logger.info( + f"Identifying relevant code sections. Attempt {retry_attempt + 1} of {MAX_RETRIES}.\n{identified_code.identified_spans}" + ) + + view_context = FileContext(repo=self._repository) + if identified_code.identified_spans: + for identified_spans in identified_code.identified_spans: + view_context.add_line_span_to_context( + identified_spans.file_path, + identified_spans.start_line, + identified_spans.end_line, + add_extra=True, + ) + else: + return view_context, completion + + tokens = view_context.context_size() + + if tokens > self.max_identify_tokens: + logger.info( + f"Identified code sections are too large ({tokens} tokens)." + ) + + messages.append( + AssistantMessage(content=identified_code.model_dump_json()) + ) + + messages.append( + UserMessage( + content=f"The identified code sections are too large ({tokens} tokens). Maximum allowed is {self.max_search_tokens} tokens. " + f"Please identify a smaller subset of the most relevant code sections." + ) + ) + else: + logger.info( + f"Identified code sections are within the token limit ({tokens} tokens)." + ) + return view_context, completion + + # If we've exhausted all retries and still too large + raise CompletionRejectError( + f"Unable to reduce code selection to under {self.max_search_tokens} tokens after {MAX_RETRIES} attempts", + last_completion=completion, + ) + + @classmethod + def model_validate(cls, obj: Any) -> "SearchBaseAction": + if isinstance(obj, dict): + obj = obj.copy() + repository = obj.pop("repository") + code_index = obj.pop("code_index") + return cls(code_index=code_index, repository=repository, **obj) + return super().model_validate(obj) diff --git a/moatless/actions/semantic_search.py b/moatless/actions/semantic_search.py new file mode 100644 index 00000000..e88e339b --- /dev/null +++ b/moatless/actions/semantic_search.py @@ -0,0 +1,104 @@ +from typing import Optional, List, Type, ClassVar + +from pydantic import Field, model_validator + +from moatless.actions.model import ActionArguments, FewShotExample +from moatless.actions.search_base import SearchBaseAction, SearchBaseArgs +from moatless.index.types import SearchCodeResponse + + +class SemanticSearchArgs(SearchBaseArgs): + """Use this when you don't know exact names or code but want to find related functionality. + + Perfect for: + - Finding functionality by description: query="code that handles password hashing" + - Finding related test cases: query="tests for user registration", category="test" + - Finding implementations: query="database connection pooling", category="implementation" + - Finding patterns: query="error handling for API requests" + + This is the most flexible search when you: + - Don't know exact function/class names + - Want to find similar implementations + - Need to discover related code + - Want to explore how certain features are implemented + """ + + query: str = Field( + ..., description="Natural language description of what you're looking for." + ) + category: Optional[str] = Field( + None, + description="The category of files to search for. This can be 'implementation' for core implementation files or 'test' for test files.", + ) + + class Config: + title = "SemanticSearch" + + def to_prompt(self): + prompt = f"Searching for code using the query: {self.query}" + if self.file_pattern: + prompt += f" in files matching the pattern: {self.file_pattern}" + return prompt + + @model_validator(mode="after") + def validate_query(self) -> "SemanticSearchArgs": + if not self.query.strip(): + raise ValueError("query cannot be empty") + return self + + +class SemanticSearch(SearchBaseAction): + args_schema: ClassVar[Type[ActionArguments]] = SemanticSearchArgs + + def _search(self, args: SemanticSearchArgs) -> SearchCodeResponse: + return self._code_index.semantic_search( + args.query, + file_pattern=args.file_pattern, + max_results=self.max_hits, + category=args.category, + ) + + def _search_for_alternative_suggestion( + self, args: SemanticSearchArgs + ) -> SearchCodeResponse: + if args.file_pattern: + return self._code_index.semantic_search( + args.query, + max_results=self.max_hits, + category=args.category, + ) + + return SearchCodeResponse() + + @classmethod + def get_evaluation_criteria(cls, trajectory_length: int | None = None) -> List[str]: + criteria = super().get_evaluation_criteria(trajectory_length) + criteria.extend( + [ + "Query Relevance: Evaluate if the search query is well-defined and likely to find relevant code.", + "Category Appropriateness: Assess if the category (implementation or test) aligns with the search intent.", + ] + ) + return criteria + + @classmethod + def get_few_shot_examples(cls) -> List[FewShotExample]: + return [ + FewShotExample.create( + user_input="Find all implementations of database connection pooling in our codebase", + action=SemanticSearchArgs( + scratch_pad="To find implementations of database connection pooling, we should search for code related to managing database connections efficiently. This might include classes or functions that handle connection creation, reuse, and management.", + query="database connection pooling implementation", + category="implementation", + ), + ), + FewShotExample.create( + user_input="We need to find all test cases related to user authentication in our test suite", + action=SemanticSearchArgs( + scratch_pad="To find test cases related to user authentication, we should search for test files that contain assertions and scenarios specifically testing authentication functionality.", + query="user authentication test cases", + file_pattern="tests/*.py", + category="test", + ), + ), + ] diff --git a/moatless/actions/string_replace.py b/moatless/actions/string_replace.py new file mode 100644 index 00000000..bba5fae1 --- /dev/null +++ b/moatless/actions/string_replace.py @@ -0,0 +1,370 @@ +import logging +import re +from typing import List + +from pydantic import Field, model_validator + +from moatless.actions.action import Action +from moatless.actions.code_modification_mixin import CodeModificationMixin +from moatless.actions.model import ( + ActionArguments, + Observation, + FewShotExample, + RetryException, +) +from moatless.file_context import FileContext +from moatless.index.code_index import CodeIndex +from moatless.repository.file import do_diff +from moatless.repository.repository import Repository +from moatless.runtime.runtime import RuntimeEnvironment + +logger = logging.getLogger(__name__) + +SNIPPET_LINES = 4 + + +class StringReplaceArgs(ActionArguments): + """ + Replace text in a file with exact string matching. + + Notes: + * The old_str parameter must match EXACTLY one or more consecutive lines from the original file + * Whitespace and indentation must match exactly + * The old_str must be unique within the file - include enough surrounding context to ensure uniqueness + * The new_str parameter contains the replacement text that will replace old_str + * No changes will be made if old_str appears multiple times or cannot be found + * Do not include line numbers in old_str or new_str - provide only the actual code content + """ + + path: str = Field(..., description="Path to the file to edit") + old_str: str = Field( + ..., + description="Exact string from the file to replace - must match exactly, be unique, include proper indentation, and contain no line numbers", + ) + new_str: str = Field( + ..., + description="New string to replace the old_str with - must use proper indentation and contain no line numbers", + ) + + @model_validator(mode="after") + def validate_args(self) -> "StringReplaceArgs": + if not self.path.strip(): + raise ValueError("path cannot be empty") + if not self.old_str.strip(): + raise ValueError("old_str cannot be empty") + + def remove_line_numbers(text: str) -> str: + lines = text.split("\n") + # Pattern to match line numbers at start of line + line_number_pattern = r"^\s*\d+" + + # Remove line numbers if found + cleaned_lines = [re.sub(line_number_pattern, "", line) for line in lines] + return "\n".join(cleaned_lines) + + self.old_str = remove_line_numbers(self.old_str) + self.new_str = remove_line_numbers(self.new_str) + + return self + + class Config: + title = "StringReplace" + + +class StringReplace(Action, CodeModificationMixin): + """ + Action to replace strings in a file. + """ + + args_schema = StringReplaceArgs + + def __init__( + self, + runtime: RuntimeEnvironment | None = None, + code_index: CodeIndex | None = None, + repository: Repository | None = None, + **data, + ): + super().__init__(**data) + # Initialize mixin attributes directly + object.__setattr__(self, "_runtime", runtime) + object.__setattr__(self, "_code_index", code_index) + object.__setattr__(self, "_repository", repository) + + def execute( + self, args: StringReplaceArgs, file_context: FileContext + ) -> Observation: + path_str = self.normalize_path(args.path) + path, error = self.validate_file_access(path_str, file_context) + if error: + return error + + context_file = file_context.get_context_file(str(path)) + file_content = context_file.content.expandtabs() + logger.info(f"Editing file {path}\n{file_content}") + old_str = args.old_str.expandtabs() + new_str = args.new_str.expandtabs() + + if old_str == new_str: + return Observation( + message=f"The old_str and new_str are the same. No changes were made.", + properties={"fail_reason": "no_changes"}, + ) + + # Use find_exact_matches instead of inline code + exact_matches = find_exact_matches(old_str, file_content) + + if len(exact_matches) == 0: + potential_matches = find_potential_matches(old_str, file_content) + + if len(potential_matches) == 1: + match = potential_matches[0] + match_content = match["content"] + + message = ( + f"No changes were made. The provided old_str was not found, but a similar code block was found. " + f"To replace this code, the old_str must match exactly:\n\n```\n{match_content}\n```\n\n" + ) + + if match["diff_reason"] == "indentation_differs": + first_line_match = match_content.splitlines()[0] + first_line_old = old_str.splitlines()[0] + match_indent = len(first_line_match) - len( + first_line_match.lstrip() + ) + provided_indent = len(first_line_old) - len(first_line_old.lstrip()) + + message += ( + f"The content matches but the indentation is different. " + f"The actual code has {match_indent} spaces but your old_str has {provided_indent} spaces. " + f"Please update old_str to match the exact indentation shown above." + ) + elif match["diff_reason"] == "line_breaks_differs": + message += "The content matches but the line breaks are different. Please update old_str to match the exact line breaks shown above." + + raise RetryException(message, args) + elif len(potential_matches) > 1: + matches_info = "\n".join( + f"- Lines {m['start_line']}-{m['end_line']} ({m['diff_reason']})" + for m in potential_matches + ) + raise RetryException( + message=f"Multiple potential matches found with different formatting:\n{matches_info}\nTry including more surrounding context to create a unique match.", + action_args=args, + ) + + # If no matches found at all + new_str_occurrences = file_content.count(new_str) + if new_str_occurrences > 0: + return Observation( + message=f"New string '{new_str}' already exists in {path}. No changes were made.", + properties={"fail_reason": "string_already_exists"}, + ) + + return Observation( + message=f"String '{old_str}' not found in {path}", + properties={"fail_reason": "string_not_found"}, + expect_correction=True, + ) + elif len(exact_matches) > 1: + matches_info = "\n".join(f"- Line {m['start_line']}" for m in exact_matches) + return Observation( + message=f"Multiple occurrences of string found:\n{matches_info}\nTry including more surrounding lines to create a unique match.", + properties={"fail_reason": "multiple_occurrences"}, + expect_correction=True, + ) + + properties = {} + + match = exact_matches[0] + start_line = match["start_line"] - 1 # Convert to 0-based index + end_line = match["end_line"] - 1 + + # Check if the lines to be modified are in context + if not context_file.lines_is_in_context(start_line, end_line): + properties["flags"] = ["lines_not_in_context"] + logger.warning( + f"Lines {start_line + 1}-{end_line + 1} are not in context for {path}" + ) + + new_file_content = file_content.replace(old_str, new_str) + diff = do_diff(str(path), file_content, new_file_content) + + context_file.apply_changes(new_file_content) + + # Create a snippet of the edited section + snippet_start_line = max(0, start_line - SNIPPET_LINES - 1) + end_line = start_line + SNIPPET_LINES + new_str.count("\n") + snippet = "\n".join(new_file_content.split("\n")[snippet_start_line:end_line]) + + snippet_with_lines = self.format_snippet_with_lines(snippet, start_line + 1) + + success_msg = ( + f"The file {path} has been edited. Here's the result of running `cat -n` " + f"on a snippet of {path}:\n{snippet_with_lines}\n" + "Review the changes and make sure they are as expected. Edit the file again if necessary." + ) + + properties["diff"] = diff + + observation = Observation( + message=success_msg, + properties=properties, + ) + + return self.run_tests_and_update_observation( + observation=observation, + file_path=str(path), + scratch_pad=args.scratch_pad, + file_context=file_context, + ) + + @classmethod + def get_few_shot_examples(cls) -> List[FewShotExample]: + return [ + FewShotExample.create( + user_input="Update the error message in the validate_user method", + action=StringReplaceArgs( + scratch_pad="Improving the error message to be more descriptive", + path="auth/validator.py", + old_str=""" if not user.is_active: + raise ValueError("Invalid user") + return user""", + new_str=""" if not user.is_active: + raise ValueError(f"Invalid user: {username} does not meet the required criteria") + return user""", + ), + ), + FewShotExample.create( + user_input="Update the logging configuration", + action=StringReplaceArgs( + scratch_pad="Enhancing the logging configuration with more detailed format and file handler", + path="utils/logger.py", + old_str="""logging.basicConfig( + level=logging.INFO, + format="%(levelname)s - %(message)s" +)""", + new_str="""logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + handlers=[ + logging.FileHandler("app.log"), + logging.StreamHandler() + ] +)""", + ), + ), + FewShotExample.create( + user_input="Update the user validation logic", + action=StringReplaceArgs( + scratch_pad="Adding email validation and password strength check", + path="auth/validator.py", + old_str="""def validate_user(username, password): + if len(username) < 3: + return False + if len(password) < 8: + return False + return True""", + new_str="""def validate_user(username, password): + if len(username) < 3 or not is_valid_email(username): + return False + if len(password) < 12 or not has_special_chars(password): + return False + if not has_numbers(password): + return False + return True""", + ), + ), + ] + + +def normalize_indentation(s): + return "\n".join(line.strip() for line in s.splitlines()) + + +def normalize_line_breaks(s): + # Remove all whitespace and line breaks + return "".join(line.strip() for line in s.replace(" ", "").splitlines()) + + +def find_potential_matches(old_str, new_content): + matches = [] + content_lines = new_content.splitlines() + old_str_lines = old_str.splitlines() + window_size = len(old_str_lines) + + # Pre-compute normalized versions of old_str + old_str_no_breaks = normalize_line_breaks(old_str) + old_str_no_indent = normalize_indentation(old_str) + + # First pass: find indentation matches using fixed window size + indentation_matches = [] + for start_idx in range(len(content_lines) - window_size + 1): + window = "\n".join(content_lines[start_idx : start_idx + window_size]) + window_no_indent = normalize_indentation(window) + + if window_no_indent == old_str_no_indent: + indentation_matches.append( + { + "start_line": start_idx + 1, + "end_line": start_idx + window_size, + "content": window, + "diff_reason": "indentation_differs", + } + ) + + # If we found indentation matches, return those only + if indentation_matches: + return indentation_matches + + # Second pass: find line break matches only if no indentation matches were found + start_idx = 0 + while start_idx < len(content_lines): + if not content_lines[start_idx].strip(): + start_idx += 1 + continue + + found_match = False + for end_idx in range(start_idx + 1, min(start_idx + 5, len(content_lines) + 1)): + window = "\n".join(content_lines[start_idx:end_idx]) + window_no_breaks = normalize_line_breaks(window) + + if window_no_breaks == old_str_no_breaks: + matches.append( + { + "start_line": start_idx + 1, + "end_line": end_idx, + "content": window, + "diff_reason": "line_breaks_differ", + } + ) + start_idx = end_idx # Skip to the end of this window + found_match = True + break + + if not found_match: + start_idx += 1 # Only increment by 1 if no match was found + + return matches + + +def find_exact_matches(old_str: str, file_content: str) -> list[dict]: + """Find exact matches of old_str in file_content, preserving line numbers.""" + file_lines = file_content.splitlines() + old_str_lines = old_str.splitlines() + matches = [] + + # Check each possible starting position in the file + for i in range(len(file_lines) - len(old_str_lines) + 1): + potential_match = "\n".join(file_lines[i : i + len(old_str_lines)]) + if potential_match == old_str: + matches.append( + { + "start_line": i + 1, + "end_line": i + len(old_str_lines), + "content": potential_match, + "diff_reason": "exact_match", + } + ) + + return matches diff --git a/moatless/actions/view_code.py b/moatless/actions/view_code.py new file mode 100644 index 00000000..528c230f --- /dev/null +++ b/moatless/actions/view_code.py @@ -0,0 +1,301 @@ +import logging +from typing import List, Optional + +from pydantic import Field, BaseModel, PrivateAttr + +from moatless.actions.action import Action +from moatless.actions.model import ( + ActionArguments, + FewShotExample, + Observation, + RetryException +) +from moatless.codeblocks import CodeBlockType +from moatless.file_context import FileContext, ContextFile +from moatless.repository.repository import Repository + +logger = logging.getLogger(__name__) + + +class CodeSpan(BaseModel): + file_path: str = Field( + description="The file path where the relevant code is found." + ) + start_line: Optional[int] = Field( + None, description="The start line of the code to add to context." + ) + end_line: Optional[int] = Field( + None, description="The end line of the code to add to context." + ) + span_ids: list[str] = Field( + default_factory=list, + description="Span IDs identiying the relevant code spans. A span id is a unique identifier for a code sippet. It can be a class name or function name. For functions in classes separete with a dot like 'class.function'.", + ) + + @property + def log_name(self): + log = self.file_path + + if self.start_line and self.end_line: + log += f" {self.start_line}-{self.end_line}" + + if self.span_ids: + log += f" {', '.join(self.span_ids)}" + + return log + + +class ViewCodeArgs(ActionArguments): + """View the code in a file or a specific code span.""" + + scratch_pad: str = Field(..., description="Your thoughts on the code change.") + files: List[CodeSpan] = Field( + ..., description="The code that should be provided in the file context." + ) + + class Config: + title = "ViewCode" + + @property + def log_name(self): + if len(self.files) == 1: + return f"ViewCode({self.files[0].log_name})" + else: + logs = [] + for i, file in enumerate(self.files): + logs.append(f"{i}=[{file.log_name}]") + return f"ViewCode(" + ", ".join(logs) + ")" + + def to_prompt(self): + prompt = "Show the following code:\n" + for file in self.files: + prompt += f"* {file.file_path}\n" + if file.start_line and file.end_line: + prompt += f" Lines: {file.start_line}-{file.end_line}\n" + if file.span_ids: + prompt += f" Spans: {', '.join(file.span_ids)}\n" + return prompt + + +class ViewCode(Action): + args_schema = ViewCodeArgs + + _repository: Repository = PrivateAttr() + + def __init__(self, repository: Repository | None = None, **data): + super().__init__(**data) + self._repository = repository + + max_tokens: int = Field( + 2000, + description="The maximum number of tokens in the requested code.", + ) + + def execute(self, args: ViewCodeArgs, file_context: FileContext) -> Observation: + # Group files by filepath and combine span_ids + grouped_files = {} + for file_with_spans in args.files: + if file_with_spans.file_path not in grouped_files: + grouped_files[file_with_spans.file_path] = file_with_spans + else: + grouped_files[file_with_spans.file_path].span_ids.extend( + file_with_spans.span_ids + ) + + properties = {"files": {}} + message = "" + + # Validate all file spans before processing + for file_path, file_span in grouped_files.items(): + logger.info( + f"Processing file {file_path} with span_ids {file_span.span_ids}" + ) + file = file_context.get_file(file_path) + + if not file: + message = f"The requested file {file_path} is not found in the file repository. Use the search functions to search for the code if you are unsure of the file path." + properties["fail_reason"] = "file_not_found" + return Observation( + message=message, properties=properties, expect_correction=False + ) + + if self._repository.is_directory(file_path): + message = f"The requested file {file_path} is a directory and not a file. Use the search functions to search for the code if you are unsure of the file path." + properties["fail_reason"] = "is_directory" + return Observation( + message=message, properties=properties, expect_correction=False + ) + + view_context = FileContext(repo=self._repository) + + for file_path, file_span in grouped_files.items(): + file = file_context.get_file(file_path) + + if file_span.span_ids: + missing_span_ids = set() + suggested_span_ids = set() + found_span_ids = set() + if file_span.span_ids and not file.module: + logger.warning( + f"Tried to add span ids {file_span.span_ids} to not parsed file {file.file_path}." + ) + message += self.create_retry_message( + file, f"No span ids found. Is it empty?" + ) + properties["fail_reason"] = "invalid_file" + raise RetryException(message=message, action_args=args) + + for span_id in file_span.span_ids: + span_ids = set() + block_span = file.module.find_span_by_id(span_id) + if not block_span: + # Try to find the relevant code block by code block identifier + block_identifier = span_id.split(".")[-1] + blocks = file.module.find_blocks_with_identifier( + block_identifier + ) + + if not blocks: + missing_span_ids.add(span_id) + elif len(blocks) > 1: + for block in blocks: + if ( + block.belongs_to_span.span_id + not in suggested_span_ids + ): + suggested_span_ids.add( + block.belongs_to_span.span_id + ) + else: + block_span = blocks[0].belongs_to_span + + if block_span: + if block_span.initiating_block.type == CodeBlockType.CLASS: + class_block = block_span.initiating_block + found_span_ids.add(block_span.span_id) + class_tokens = class_block.sum_tokens() + + view_context.add_spans_to_context( + file_path, class_block.get_all_span_ids() + ) + + else: + view_context.add_span_to_context( + file_path, block_span.span_id, add_extra=False + ) + + elif file_span.start_line: + view_context.add_line_span_to_context( + file_path, file_span.start_line, file_span.end_line, add_extra=False + ) + else: + view_context.add_file(file_path, show_all_spans=True) + + if view_context.context_size() > self.max_tokens: + content = view_context.create_prompt( + show_span_ids=False, + show_line_numbers=True, + show_outcommented_code=True, + outcomment_code_comment="...", + only_signatures=True, + ) + raise RetryException( + message=f"The request code is too large ({view_context.context_size()} tokens) to view in its entirety. Maximum allowed is {self.max_tokens} tokens. " + f"Please specify the functions or classes to view.\n" + f"Here's a structure of the requested code spans\n: {content}", + action_args=args, + ) + + if view_context.is_empty(): + message += f"\nThe specified code spans wasn't found." + properties["fail_reason"] = "no_spans_found" + else: + message += "Here's the contents of the requested code spans:\n" + message += view_context.create_prompt( + show_span_ids=False, + show_line_numbers=True, + exclude_comments=False, + show_outcommented_code=True, + outcomment_code_comment="...", + ) + + new_span_ids = file_context.add_file_context(view_context) + if not new_span_ids: + properties["fail_reason"] = "no_spans_added" + + properties["files"][file_path] = { + "new_span_ids": list(new_span_ids), + } + + summary = f"Showed the following code spans:\n" + file_context.create_summary() + + return Observation( + message=message, + summary=summary, + properties=properties, + expect_correction=False, + ) + + def create_retry_message(self, file: ContextFile, message: str): + retry_message = f"\n\nProblems when trying to find spans in {file.file_path}. " + retry_message += message + + hint = self.create_hint(file) + if hint: + retry_message += f"\n\n{hint}" + + if file.module and file.span_ids: + search_result_context = FileContext(repo=self._repository) + search_result_context.add_file(file.file_path, show_all_spans=True) + + search_result_str = search_result_context.create_prompt( + show_span_ids=False, + show_line_numbers=False, + exclude_comments=False, + show_outcommented_code=True, + outcomment_code_comment="...", + only_signatures=True, + ) + retry_message += f"\n\nHere's the code structure:\n{search_result_str}" + + return retry_message + + def create_hint(self, file: ContextFile): + if "test" in file.file_path: + return "If you want to write a new test method, start by adding one of the existing ones that might relevant for reference." + + return None + + def span_id_list(self, span_ids: set[str]) -> str: + list_str = "" + for span_id in span_ids: + list_str += f" * {span_id}\n" + return list_str + + @classmethod + def get_few_shot_examples(cls) -> List[FewShotExample]: + return [ + FewShotExample.create( + user_input="I need to see the implementation of the authenticate method in the AuthenticationService class", + action=ViewCodeArgs( + scratch_pad="To understand the authentication implementation, we need to examine the authenticate method within the AuthenticationService class.", + files=[ + CodeSpan( + file_path="auth/service.py", + span_ids=["AuthenticationService.authenticate"], + ) + ], + ), + ), + FewShotExample.create( + user_input="Show me lines 50-75 of the database configuration file", + action=ViewCodeArgs( + scratch_pad="To examine the database configuration settings, we'll look at the specified line range in the config file.", + files=[ + CodeSpan( + file_path="config/database.py", start_line=50, end_line=75 + ) + ], + ), + ), + ] diff --git a/moatless/agent/__init__.py b/moatless/agent/__init__.py new file mode 100644 index 00000000..da259db1 --- /dev/null +++ b/moatless/agent/__init__.py @@ -0,0 +1,2 @@ +from moatless.agent.code_agent import ActionAgent +from moatless.agent.code_agent import CodingAgent diff --git a/moatless/agent/agent.py b/moatless/agent/agent.py new file mode 100644 index 00000000..d3b22f76 --- /dev/null +++ b/moatless/agent/agent.py @@ -0,0 +1,277 @@ +import importlib +import logging +from typing import List, Type, Dict, Any, Optional + +from pydantic import BaseModel, Field, PrivateAttr, model_validator, ValidationError + +from moatless.actions.action import Action +from moatless.actions.model import ( + ActionArguments, + Observation, + RetryException, + ActionError, +) +from moatless.completion.completion import CompletionModel +from moatless.completion.model import AssistantMessage, UserMessage, Completion +from moatless.exceptions import RuntimeError, CompletionRejectError +from moatless.index.code_index import CodeIndex +from moatless.node import Node, MessageHistoryType +from moatless.repository.repository import Repository + +logger = logging.getLogger(__name__) + + +class ActionAgent(BaseModel): + system_prompt: Optional[str] = Field( + None, description="System prompt to be used for generating completions" + ) + actions: List[Action] = Field(default_factory=list) + message_history_type: MessageHistoryType = Field( + default=MessageHistoryType.MESSAGES, + description="Determines how message history is generated", + ) + include_extra_history: bool = Field( + default=True, + description="Whether to include extra execution details in message history", + ) + include_file_context: bool = Field( + default=False, + description="Whether to include the full file context in the last message", + ) + include_git_patch: bool = Field( + default=False, + description="Whether to include the full git patch in the last message", + ) + + _completion: CompletionModel = PrivateAttr() + _action_map: dict[Type[ActionArguments], Action] = PrivateAttr(default_factory=dict) + + def __init__( + self, + completion: CompletionModel, + system_prompt: str | None = None, + actions: List[Action] | None = None, + **data, + ): + actions = actions or [] + super().__init__(actions=actions, system_prompt=system_prompt, **data) + self.set_actions(actions) + self._completion = completion + + def set_actions(self, actions: List[Action]): + self.actions = actions + self._action_map = {action.args_schema: action for action in actions} + + @model_validator(mode="after") + def verify_system_prompt(self) -> "ActionAgent": + if self.system_prompt == "": + self.system_prompt = None + return self + + @model_validator(mode="after") + def verify_actions(self) -> "ActionAgent": + for action in self.actions: + if not isinstance(action, Action): + raise ValidationError( + f"Invalid action type: {type(action)}. Expected Action subclass." + ) + if not hasattr(action, "args_schema"): + raise ValidationError( + f"Action {action.__class__.__name__} is missing args_schema attribute" + ) + return self + + def run(self, node: Node): + """Run the agent on a node to generate and execute an action.""" + + if node.action: + logger.info(f"Node{node.node_id}: Resetting node") + node.reset() + + logger.info(node.file_context.model_dump()) + + possible_actions = self.determine_possible_actions(node) + if not possible_actions: + raise RuntimeError(f"No possible actions for Node{node.node_id}") + node.possible_actions = [action.name for action in possible_actions] + system_prompt = self.generate_system_prompt(possible_actions) + action_args = [action.args_schema for action in possible_actions] + + messages = node.generate_message_history( + message_history_type=self.message_history_type + ) + + max_attempts = 3 + for attempt in range(max_attempts): + logger.info( + f"Node{node.node_id}: Run attempt {attempt + 1} of {max_attempts}" + ) + try: + node.action, completion_response = self._completion.create_completion( + messages, system_prompt=system_prompt, response_model=action_args + ) + node.completions["build_action"] = completion_response + except CompletionRejectError as e: + node.action = ActionError( + error=f"Failed to generate action. Error: {e}" + ) + + if e.last_completion: + # TODO: Move mapping to completion.py + node.completions["build_action"] = Completion.from_llm_completion( + input_messages=e.messages, + completion_response=e.last_completion, + model=self.completion.model, + ) + + node.observation = Observation( + message=e.message, + is_terminal=True, + properties={"error": str(e), "retries": attempt}, + ) + return + + duplicate_node = node.find_duplicate() + if duplicate_node: + node.is_duplicate = True + logger.info( + f"Node{node.node_id} is a duplicate to Node{duplicate_node.node_id}. Skipping execution." + ) + return + try: + node.observation = self._execute(node) + if node.observation.execution_completion: + node.completions["execute_action"] = ( + node.observation.execution_completion + ) + + if attempt > 0: + node.observation.properties["retries"] = attempt + + logger.info( + f"Node{node.node_id}: Executed action: {node.action.name}. " + f"Terminal: {node.observation.terminal if node.observation else False}. " + f"Output: {node.observation.message if node.observation else None}" + ) + + return + + except RetryException as e: + logger.warning( + f"Node{node.node_id}: Action needs retry (attempt {attempt + 1}): {e.message}" + ) + + messages.append( + AssistantMessage(tool_call=e.action_args.to_tool_call()) + ) + messages.append(UserMessage(content=e.message)) + if attempt == max_attempts - 1: + node.observation = Observation( + message=e.message, + is_terminal=True, + properties={"retries": attempt}, + ) + return + except CompletionRejectError as e: + logger.warning(f"Node{node.node_id}: Action rejected: {e.message}") + node.completions["execute_action"] = e.last_completion + node.observation = Observation( + message=e.message, + is_terminal=True, + properties={"retries": attempt}, + ) + return + + def _execute(self, node: Node): + action = self._action_map.get(type(node.action)) + if not action: + logger.error( + f"Node{node.node_id}: Action {node.action.name} not found in action map. " + f"Available actions: {self._action_map.keys()}" + ) + raise RuntimeError(f"Action {type(node.action)} not found in action map.") + + return action.execute(node.action, node.file_context) + + def determine_possible_actions(self, node: Node) -> List[Action]: + """Determine which actions that the agent can take based on the current node state.""" + actions = self.actions + logger.debug( + f"Possible actions for Node{node.node_id}: {[action.__class__.__name__ for action in actions]}" + ) + return actions + + def generate_system_prompt(self, possible_actions: List[Action]) -> str: + """Generate a system prompt for the agent.""" + return self.system_prompt + + def model_dump(self, **kwargs) -> Dict[str, Any]: + dump = super().model_dump(**kwargs) + dump["completion"] = self._completion.model_dump(**kwargs) + dump["actions"] = [] + dump["agent_class"] = f"{self.__class__.__module__}.{self.__class__.__name__}" + dump["message_history_type"] = self.message_history_type.value + for action in self.actions: + action_dump = action.model_dump(**kwargs) + action_dump["action_class"] = ( + f"{action.__class__.__module__}.{action.__class__.__name__}" + ) + dump["actions"].append(action_dump) + return dump + + @classmethod + def model_validate( + cls, + obj: Any, + repository: Repository = None, + runtime: Any = None, + code_index: CodeIndex = None, + ) -> "ActionAgent": + if isinstance(obj, dict): + obj = obj.copy() + completion_data = obj.pop("completion", None) + agent_class_path = obj.pop("agent_class", None) + + if "message_history_type" in obj: + obj["message_history_type"] = MessageHistoryType( + obj["message_history_type"] + ) + + if completion_data: + obj["completion"] = CompletionModel.model_validate(completion_data) + else: + obj["completion"] = None + + if repository: + obj["actions"] = [ + Action.from_dict( + action_data, + repository=repository, + runtime=runtime, + code_index=code_index, + ) + for action_data in obj.get("actions", []) + ] + else: + logger.debug(f"No repository provided, skip initiating actions") + obj["actions"] = [] + + if agent_class_path: + module_name, class_name = agent_class_path.rsplit(".", 1) + module = importlib.import_module(module_name) + agent_class = getattr(module, class_name) + instance = agent_class(**obj) + else: + instance = cls(**obj) + + return instance + + return super().model_validate(obj) + + @property + def completion(self) -> CompletionModel: + return self._completion + + @completion.setter + def completion(self, value: CompletionModel): + self._completion = value diff --git a/moatless/agent/code_agent.py b/moatless/agent/code_agent.py new file mode 100644 index 00000000..e91b1e3e --- /dev/null +++ b/moatless/agent/code_agent.py @@ -0,0 +1,286 @@ +import json +import logging +from typing import List, Type + +from moatless.actions import ( + FindClass, + FindFunction, + FindCodeSnippet, + SemanticSearch, + ViewCode, +) +from moatless.actions.action import Action +from moatless.actions.apply_change_and_test import ApplyCodeChangeAndTest +from moatless.actions.code_change import RequestCodeChange +from moatless.actions.create_file import CreateFile +from moatless.actions.edit import ClaudeEditTool +from moatless.actions.finish import Finish +from moatless.actions.insert_line import InsertLine +from moatless.actions.reject import Reject +from moatless.actions.run_tests import RunTests +from moatless.actions.string_replace import StringReplace +from moatless.agent.agent import ActionAgent +from moatless.agent.code_prompts import ( + CLAUDE_REACT_PROMPT, + REACT_SYSTEM_PROMPT, + SYSTEM_PROMPT, + SIMPLE_CODE_PROMPT, + EDIT_SYSTEM_PROMPT, +) +from moatless.completion.completion import ( + LLMResponseFormat, + CompletionModel, +) +from moatless.index import CodeIndex +from moatless.node import Node, MessageHistoryType +from moatless.repository.repository import Repository +from moatless.runtime.runtime import RuntimeEnvironment + +logger = logging.getLogger(__name__) + + +class CodingAgent(ActionAgent): + def generate_system_prompt(self, possible_actions: List[Type[Action]]) -> str: + if self.system_prompt: + prompt = self.system_prompt + elif self.message_history_type == MessageHistoryType.REACT: + prompt = REACT_SYSTEM_PROMPT + elif any(action.name == "StringReplace" for action in possible_actions): + prompt = EDIT_SYSTEM_PROMPT + else: + prompt = SYSTEM_PROMPT + + few_shot_examples = [] + for action in possible_actions: + examples = action.get_few_shot_examples() + if examples: + few_shot_examples.extend(examples) + + if few_shot_examples: + prompt += "\n\n# Examples\nHere are some examples of how to use the available actions:\n\n" + for i, example in enumerate(few_shot_examples): + if self.completion.response_format == LLMResponseFormat.REACT: + prompt += f"\n**Example {i+1}**" + action_data = example.action.model_dump() + scratch_pad = action_data.pop("scratch_pad", "") + prompt += ( + f"\nTask: {example.user_input}" + f"Thought: {scratch_pad}\n" + f"Action: {example.action.name}\n" + f"Action Input: {json.dumps(action_data, indent=2)}\n\n" + ) + elif self.completion.response_format == LLMResponseFormat.JSON: + action_json = { + "action": example.action.model_dump(), + "action_type": example.action.name, + } + prompt += f"User: {example.user_input}\nAssistant:\n```json\n{json.dumps(action_json, indent=2)}\n```\n\n" + + return prompt + + def determine_possible_actions(self, node: Node) -> List[Action]: + possible_actions = self.actions.copy() + + # Remove RequestCodeChange and RunTests if there's no file context + if node.file_context.is_empty(): + possible_actions = [ + action + for action in possible_actions + if action.__class__ + not in [ + ApplyCodeChangeAndTest, + RequestCodeChange, + StringReplace, + CreateFile, + InsertLine, + RunTests, + ] + ] + + # Remove Finish and Reject if there's no file context or no code changes + if not node.file_context.has_patch(): + possible_actions = [ + action + for action in possible_actions + if action.__class__ not in [Finish, Reject] + ] + + # Remove Finish if a sibling has already finished + # possible_actions = self.filter_finished(node, possible_actions) + + logger.info( + f"Possible actions for Node{node.node_id}: {[action.__class__.__name__ for action in possible_actions]}" + ) + + return possible_actions + + def filter_finished(self, node: Node, possible_actions: List[Action]): + siblings = node.get_sibling_nodes() + has_finished = any(child.action.name == "Finish" for child in siblings) + if has_finished: + possible_actions = [ + action for action in possible_actions if action.name != "Finish" + ] + return possible_actions + + def filter_duplicates(self, node: Node, possible_actions: List[Action]): + # Remove actions that have been marked as duplicates + if node.parent: + siblings = [ + child for child in node.parent.children if child.node_id != node.node_id + ] + duplicate_actions = set( + child.action.name for child in siblings if child.is_duplicate + ) + possible_actions = [ + action + for action in possible_actions + if action.name not in duplicate_actions + ] + + return possible_actions + + @classmethod + def create( + cls, + repository: Repository, + completion_model: CompletionModel, + code_index: CodeIndex | None = None, + runtime: RuntimeEnvironment | None = None, + edit_completion_model: CompletionModel | None = None, + use_edit_actions: bool = False, + **kwargs, + ): + system_prompt = None + if completion_model.supports_anthropic_computer_use: + actions = create_claude_coding_actions( + repository=repository, + code_index=code_index, + runtime=runtime, + completion_model=completion_model, + ) + system_prompt = CLAUDE_REACT_PROMPT + elif use_edit_actions: + actions = create_edit_code_actions( + repository=repository, + code_index=code_index, + runtime=runtime, + completion_model=completion_model, + ) + + system_prompt = EDIT_SYSTEM_PROMPT + else: + actions = create_coding_actions( + repository=repository, + code_index=code_index, + runtime=runtime, + identify_completion_model=completion_model, + edit_completion_model=edit_completion_model or completion_model, + ) + + if not runtime: + system_prompt = SIMPLE_CODE_PROMPT + + return cls( + completion=completion_model, + actions=actions, + system_prompt=system_prompt, + include_extra_history=True, + include_file_context=False, + include_git_patch=False, + **kwargs, + ) + + +def create_base_actions( + repository: Repository, + code_index: CodeIndex | None = None, + completion_model: CompletionModel | None = None, +) -> List[Action]: + """Create the common base actions used across all action creators.""" + return [ + SemanticSearch( + code_index=code_index, + repository=repository, + completion_model=completion_model, + ), + FindClass( + code_index=code_index, + repository=repository, + completion_model=completion_model, + ), + FindFunction( + code_index=code_index, + repository=repository, + completion_model=completion_model, + ), + FindCodeSnippet( + code_index=code_index, + repository=repository, + completion_model=completion_model, + ), + ViewCode(repository=repository), + ] + + +def create_coding_actions( + repository: Repository, + code_index: CodeIndex | None = None, + runtime: RuntimeEnvironment | None = None, + identify_completion_model: CompletionModel | None = None, + edit_completion_model: CompletionModel | None = None, +) -> List[Action]: + actions = create_base_actions(repository, code_index, identify_completion_model) + + if runtime: + actions.append( + ApplyCodeChangeAndTest( + code_index=code_index, + repository=repository, + runtime=runtime, + completion_model=edit_completion_model, + ) + ) + else: + actions.append( + RequestCodeChange( + repository=repository, completion_model=edit_completion_model + ) + ) + + actions.extend([Finish(), Reject()]) + return actions + + +def create_edit_code_actions( + repository: Repository, + code_index: CodeIndex | None = None, + runtime: RuntimeEnvironment | None = None, + completion_model: CompletionModel | None = None, +) -> List[Action]: + """Create a list of simple code modification actions.""" + actions = create_base_actions(repository, code_index, completion_model) + + edit_actions = [ + StringReplace(repository=repository, runtime=runtime, code_index=code_index), + # InsertLine(repository=repository, runtime=runtime, code_index=code_index), + CreateFile(repository=repository, runtime=runtime, code_index=code_index), + ] + + actions.extend(edit_actions) + actions.extend([Finish(), Reject()]) + return actions + + +def create_claude_coding_actions( + repository: Repository, + code_index: CodeIndex | None = None, + runtime: RuntimeEnvironment | None = None, + completion_model: CompletionModel | None = None, +) -> List[Action]: + actions = create_base_actions(repository, code_index, completion_model) + actions.append( + ClaudeEditTool(code_index=code_index, repository=repository, runtime=runtime) + ) + actions.extend([Finish(), Reject()]) + return actions diff --git a/moatless/agent/code_prompts.py b/moatless/agent/code_prompts.py new file mode 100644 index 00000000..6e1f3996 --- /dev/null +++ b/moatless/agent/code_prompts.py @@ -0,0 +1,477 @@ +AGENT_ROLE = """You are an autonomous AI assistant with superior programming skills. Your role is to guide the +implementation process by providing detailed instructions for each step needed to solve the assigned task. +This includes searching for relevant code, analyzing requirements, planning changes, and providing implementation +details. As you're working autonomously, you cannot communicate with the user but must rely on information +you can get from the available functions. +""" + +SYSTEM_PROMPT = ( + AGENT_ROLE + + """ +# Workflow Overview +You will interact with an AI agent with limited programming capabilities, so it's crucial to include all necessary information for successful implementation. + +# Workflow Overview + +1. **Understand the Task** + * **Review the Task:** Carefully read the task provided in . + * **Identify Code to Change:** Analyze the task to determine which parts of the codebase need to be changed. + * **Identify Necessary Context:** Determine what additional parts of the codebase are needed to understand how to implement the changes. Consider dependencies, related components, and any code that interacts with the affected areas. + +2. **Locate Relevant Code** + * **Search for Code:** Use the search functions to find relevant code if it's not in the current context: + * FindClass + * FindFunction + * FindCodeSnippet + * SemanticSearch + * **View Code:** Use ViewCode to examine necessary code spans. + +3: **Locate Relevant Tests** + * **Locate Existing Tests Related to the Code Changes:** Use existing search functions with the category parameter set to 'test' to find relevant test code. + +4. **Plan Code Changes** + * **One Step at a Time:** You can only plan and implement one code change at a time. + * **Provide Instructions and Pseudo Code:** Use RequestCodeChange to specify the change. + * **Run Tests:** After each code change, use RunTests to verify that the change works as intended. + +5. **Modify or Add Tests** + * **Ensure Test Coverage:** After code changes, use RequestCodeChange to update or add tests to verify the changes. + * **Run Tests:** Use RunTests after test modifications to ensure that tests pass. + +6. **Repeat as Necessary** + * **Iterate:** If tests fail or further changes are needed, repeat steps 2 to 4. + + 7: **Finish the Task** + * **Completion:** When confident that all changes are correct and the task is resolved, use Finish. + +# Important Guidelines + + * **Focus on the Specific task** + * Implement requirements exactly as specified, without additional changes. + * Do not modify code unrelated to the task. + + * **Clear Communication** + * Provide detailed yet concise instructions. + * Include all necessary information for the AI agent to implement changes correctly. + + * **Code Context and Changes** + * Limit code changes to files in the current context. + * If you need to examine more code, use ViewCode to see it. + * Provide line numbers if known; if unknown, explain where changes should be made. + + * **Testing** + * Always update or add tests to verify your changes. + * Run tests after code modifications to ensure correctness. + + * **Error Handling** + * If tests fail, analyze the output and plan necessary corrections. + * Document your reasoning in the scratch_pad when making function calls. + + * **Task Completion** + * Finish the task only when the task is fully resolved and verified. + * Do not suggest code reviews or additional changes beyond the scope. + +# Additional Notes + * **Think step by step:** Always use the scratch_pad to document your reasoning and thought process. + * **Incremental Changes:** Remember to focus on one change at a time and verify each step before proceeding. + * **Never Guess:** Do not guess line numbers or code content. Use ViewCode to examine code when needed. + * **Collaboration:** The AI agent relies on your detailed instructions; clarity is key. +""" +) + +SIMPLE_CODE_PROMPT = ( + AGENT_ROLE + + """ +## Workflow Overview + +1. **Understand the Task** + * Review the task provided in + * Identify which code needs to change + * Determine what additional context is needed to implement changes + +2. **Locate Relevant Code** + * Use available search functions: + * FindClass + * FindFunction + * FindCodeSnippet + * SemanticSearch + * Use ViewCode to view necessary code spans + +3. **Plan and Execute Changes** + * Focus on one change at a time + * Provide detailed instructions and pseudo code + * Use RequestCodeChange to specify modifications + * Document reasoning in scratch_pad + +4. **Finish the Task** + * When confident changes are correct and task is resolved + * Use Finish command + +## Important Guidelines + +### Focus and Scope +* Implement requirements exactly as specified +* Do not modify unrelated code +* Stay within the bounds of the reported task + +### Communication +* Provide detailed yet concise instructions +* Include all necessary context for implementation +* Use scratch_pad to document reasoning + +### Code Modifications +* Only modify files in current context +* Request additional context explicitly when needed +* Provide specific locations for changes +* Make incremental, focused modifications + +### Best Practices +* Never guess at line numbers or code content +* Document reasoning for each change +* Focus on one modification at a time +* Provide clear implementation guidance +* Ensure changes directly address the task + +### Error Handling +* If implementation fails, analyze output +* Plan necessary corrections +* Document reasoning for adjustments + +Remember: The AI agent relies on your clear, detailed instructions for successful implementation. Maintain focus on the specific task and provide comprehensive guidance for each change. +""" +) + +CLAUDE_PROMPT = ( + AGENT_ROLE + + """ +# Workflow Overview +You will interact with an AI agent with limited programming capabilities, so it's crucial to include all necessary information for successful implementation. + +# Workflow Overview + +1. **Understand the Task** + * **Review the Task:** Carefully read the task provided in . + * **Identify Code to Change:** Analyze the task to determine which parts of the codebase need to be changed. + * **Identify Necessary Context:** Determine what additional parts of the codebase are needed to understand how to implement the changes. Consider dependencies, related components, and any code that interacts with the affected areas. + +2. **Locate Relevant Code** + * **Search for Code:** Use the search functions to find relevant code if it's not in the current context. + * **Request Additional Context:** Use ViewCode to view known code spans, like functions, classes or specific lines of code. + +3: **Locate Relevant Tests** + * **Locate Existing Tests Related to the Code Changes:** Use the search functions to find relevant test code. + +4. **Apply Code Changes** + * **One Step at a Time:** You can only plan and implement one code change at a time. + * **Provide Instructions and Pseudo Code:** Use the str_replace_editor tool to update the code. + * **Tests Run Automatically:** Tests will run automatically after each code change. + +5. **Modify or Add Tests** + * **Ensure Test Coverage:** After code changes, use the str_replace_editor tool to update or add tests to verify the changes. + +6. **Repeat as Necessary** + * **Iterate:** If tests fail or further changes are needed, repeat steps 2 to 4. + + 7: **Finish the Task** + * **Completion:** When confident that all changes are correct and the task is resolved, use Finish. + +# Important Guidelines + + * **Focus on the Specific task** + * Implement requirements exactly as specified, without additional changes. + * Do not modify code unrelated to the task. + + * **Clear Communication** + * Provide detailed yet concise instructions. + * Include all necessary information for the AI agent to implement changes correctly. + + * **Code Context and Changes** + * Limit code changes to files in the current context. + * If you need more code, request it explicitly. + * Provide line numbers if known; if unknown, explain where changes should be made. + + * **Testing** + * Always update or add tests to verify your changes. + + * **Error Handling** + * If tests fail, analyze the output and plan necessary corrections. + * Document your reasoning in the scratch_pad when making function calls. + + * **Task Completion** + * Finish the task only when the task is fully resolved and verified. + * Do not suggest code reviews or additional changes beyond the scope. + +# Additional Notes + * **Think step by step:** Always write out your thoughts before making function calls. + * **Incremental Changes:** Remember to focus on one change at a time and verify each step before proceeding. + * **Never Guess:** Do not guess line numbers or code content. Use ViewCode to obtain accurate information. + * **Collaboration:** The AI agent relies on your detailed instructions; clarity is key. +""" +) + + +CLAUDE_REACT_PROMPT = """ +You will write your reasoning steps inside `` tags, and then perform actions by making function calls as needed. +After each action, you will receive an Observation that contains the result of your action. Use these observations to inform your next steps. + +## How to Interact + +- **Think Step by Step:** Use the ReAct pattern to reason about the task. Document each thought process within ``. +- **Function Calls:** After your thoughts, make the necessary function calls to interact with the codebase or environment. +- **Observations:** After each function call, you will receive an Observation containing the result. Use this information to plan your next step. +- **One Action at a Time:** Only perform one action before waiting for its Observation. + +## Workflow Overview + +1. **Understand the Task** + - **Review the Task:** Carefully read the task provided in ``. + - **Identify Code to Change:** Determine which parts of the codebase need modification. + - **Identify Necessary Context:** Figure out what additional code or information you need. + +2. **Locate Relevant Code** + - **Search for Code:** Use functions like `SearchCode` to find relevant code if it's not in the current context. + - **Request Additional Context:** Use `ViewCode` to view specific code spans, functions, classes, or lines of code. + +3. **Locate Relevant Tests** + - **Find Related Tests:** Use functions to locate existing tests related to the code changes. + +4. **Apply Code Changes** + - **One Step at a Time:** Plan and implement one code change at a time. + - **Provide Instructions and Pseudo Code:** Use `str_replace_editor` to update the code. + - **Automatic Testing:** Tests run automatically after each code change. + +5. **Modify or Add Tests** + - **Ensure Test Coverage:** Update or add tests to verify the changes using `str_replace_editor`. + +6. **Repeat as Necessary** + - **Iterate:** If tests fail or further changes are needed, repeat the steps above. + +7. **Finish the Task** + - **Completion:** When confident that all changes are correct and the task is resolved, use `Finish`. + +# Important Guidelines + +- **Focus on the Specific Task** + - Implement requirements exactly as specified. + - Do not modify unrelated code. + +- **Clear Communication** + - Provide detailed yet concise instructions. + - Include all necessary information for accurate implementation. + +- **Code Context and Changes** + - Limit changes to files in the current context. + - Explicitly request more code if needed. + - Provide line numbers if known; otherwise, explain where changes should be made. + +- **Testing** + - Always update or add tests to verify your changes. + +- **Error Handling** + - If tests fail, analyze the output and plan corrections. + - Document your reasoning in `` before making function calls. + +- **Task Completion** + - Finish only when the task is fully resolved and verified. + - Do not suggest additional changes beyond the scope. + +# Additional Notes + +- **ReAct Pattern Usage:** Always write your thoughts in `` before making function calls. +- **Incremental Changes:** Focus on one change at a time and verify each step. +- **Never Guess:** Do not guess code content. Use `ViewCode` to obtain accurate information. +- **Collaboration:** The AI agent relies on your detailed instructions; clarity is key. +""" + +EDIT_SYSTEM_PROMPT = ( + AGENT_ROLE + + """ +# Workflow Overview +You will interact with an AI agent with limited programming capabilities, so it's crucial to include all necessary information for successful implementation. + +# Workflow Overview + +1. **Understand the Task** + * **Review the Task:** Carefully read the task provided in . + * **Identify Code to Change:** Analyze the task to determine which parts of the codebase need to be changed. + * **Identify Necessary Context:** Determine what additional parts of the codebase are needed to understand how to implement the changes. Consider dependencies, related components, and any code that interacts with the affected areas. + +2. **Locate Relevant Code** + * **Search for Code:** Use the search functions to find relevant code if it's not in the current context: + * FindClass + * FindFunction + * FindCodeSnippet + * SemanticSearch + * **View Code:** Use ViewCode to examine necessary code spans. + +3: **Locate Relevant Tests** + * **Locate Existing Tests Related to the Code Changes:** Use existing search functions with the category parameter set to 'test' to find relevant test code. + +4. **Apply Code Changes** + * **One Step at a Time:** You can only plan and implement one code change at a time. + * **Choose the Appropriate Action:** + * Use StringReplace to edit existing files + * Use CreateFile to create new files + * **Tests Run Automatically:** Tests will run automatically after each code change. + +5. **Modify or Add Tests** + * **Ensure Test Coverage:** After code changes, use the same actions to update or add tests to verify the changes. + * **Tests Run Automatically:** Tests will run automatically after test modifications. + +6. **Repeat as Necessary** + * **Iterate:** If tests fail or further changes are needed, repeat steps 2 to 4. + +7 **Finish the Task** + * **Completion:** When confident that all changes are correct and the task is resolved, use Finish. + +# Important Guidelines + + * Focus on the Specific Task + - Implement requirements exactly as specified, without additional changes. + - Do not modify code unrelated to the task. + + * One Action at a Time + - You must perform only ONE action before waiting for the result. Follow this strict sequence: + - Think through the next step + - Execute ONE action (search, context request, or code change) + - Wait for the result + - Plan the next step based on the result + + * Clear Communication + - Provide detailed yet concise instructions. + - Include all necessary information for the AI agent to implement changes correctly. + + * Code Context and Changes + - Limit code changes to files in the current context. + - If you need to examine more code, use ViewCode to see it. + - Provide line numbers if known; if unknown, explain where changes should be made. + + * Testing + - Tests run automatically after each code change. + - Always update or add tests to verify your changes. + + * Error Handling + - If tests fail, analyze the output and plan necessary corrections. + - Document your reasoning in the scratch_pad when making function calls. + + * Task Completion + - Finish the task only when the task is fully resolved and verified. + - Do not suggest code reviews or additional changes beyond the scope. + + * State Management + - Keep a detailed record of all code sections you have viewed and actions you have taken. + - Before performing a new action, check your scratch_pad and history to ensure you are not repeating previous steps. + - Use the information you've already gathered to inform your next steps without re-fetching the same data. + + * Avoid Redundant Actions + - Do not request to view code that has already been provided unless necessary. + - Reference previously viewed code in your reasoning and planning. +· +# Additional Notes + + * Think Step by Step + - Always use the scratch_pad to document your reasoning and thought process. + - Build upon previous steps without unnecessary repetition. + + * Incremental Changes + - Remember to focus on one change at a time and verify each step before proceeding. + + * Never Guess + - Do not guess line numbers or code content. Use ViewCode to examine code when needed. + +""" +) + +REACT_SYSTEM_PROMPT = ( + AGENT_ROLE + + """ +# Workflow Overview +You will interact with an AI agent with limited programming capabilities, so it's crucial to include all necessary information for successful implementation. + +# Workflow Overview + +1. **Understand the Task** + * **Review the Task:** Carefully read the task provided in . + * **Identify Code to Change:** Analyze the task to determine which parts of the codebase need to be changed. + * **Identify Necessary Context:** Determine what additional parts of the codebase are needed to understand how to implement the changes. Consider dependencies, related components, and any code that interacts with the affected areas. + +2. **Locate Relevant Code** + * **Search for Code:** Use the search functions to find relevant code if it's not in the current context: + * FindClass + * FindFunction + * FindCodeSnippet + * SemanticSearch + * **View Code:** Use ViewCode to examine necessary code spans. + +3: **Locate Relevant Tests** + * **Locate Existing Tests Related to the Code Changes:** Use existing search functions with the category parameter set to 'test' to find relevant test code. + +4. **Apply Code Changes** + * **One Step at a Time:** You can only plan and implement one code change at a time. + * **Choose the Appropriate Action:** + * Use StringReplace to edit existing files + * Use CreateFile to create new files + * **Tests Run Automatically:** Tests will run automatically after each code change. + +5. **Modify or Add Tests** + * **Ensure Test Coverage:** After code changes, use the same actions to update or add tests to verify the changes. + * **Tests Run Automatically:** Tests will run automatically after test modifications. + +6. **Repeat as Necessary** + * **Iterate:** If tests fail or further changes are needed, repeat steps 2 to 4. + +7 **Finish the Task** + * **Completion:** When confident that all changes are correct and the task is resolved, use Finish. + +# Important Guidelines + * **Focus on the Specific Task** + - Implement requirements exactly as specified, without additional changes. + - Do not modify code unrelated to the task. + + * **One Action at a Time** + - You must perform only ONE action before waiting for the result. + - Do not include multiple Thought-Action-Observation blocks in a single response. + - Only include one Thought, one Action, and one Action Input per response. + - Do not plan multiple steps ahead in a single response. + + * **Wait for the Observation** + - After performing an action, wait for the observation (result) before deciding on the next action. + - Do not plan subsequent actions until you have received the observation from the current action. + + * **Code Context and Changes** + - Limit code changes to files in the code you can see. + - If you need to examine more code, use ViewCode to see it. + + * **Testing** + - Tests run automatically after each code change. + - Always update or add tests to verify your changes. + - If tests fail, analyze the output and do necessary corrections. + + * **Task Completion** + - Finish the task only when the task is fully resolved and verified. + - Do not suggest code reviews or additional changes beyond the scope. + + * **State Management** + - Keep a detailed record of all code sections you have viewed and actions you have taken. + - Before performing a new action, check your history to ensure you are not repeating previous steps. + - Use the information you've already gathered to inform your next steps without re-fetching the same data. + + * **Avoid Redundant Actions** + - Do not request to view code that has already been provided unless necessary. + - Reference previously viewed code in your reasoning and planning. + +# Additional Notes + + * **Think Step by Step** + - Always document your reasoning and thought process in the Thought section. + - Build upon previous steps without unnecessary repetition. + + * **Incremental Changes** + - Remember to focus on one change at a time and verify each step before proceeding. + + * **Never Guess** + - Do not guess line numbers or code content. Use ViewCode to examine code when needed. +""" +) + +# InsertLines diff --git a/moatless/benchmark/claude_evaluation.py b/moatless/benchmark/claude_evaluation.py deleted file mode 100644 index 6b44915f..00000000 --- a/moatless/benchmark/claude_evaluation.py +++ /dev/null @@ -1,360 +0,0 @@ -import json -import logging -from typing import Optional - -import instructor - -from moatless.transition_rules import TransitionRules -from moatless.benchmark.evaluation import create_evaluation_name, Evaluation -from moatless.edit.edit import EditCode -from moatless.edit.plan import PlanToCode -from moatless.find.decide import DecideRelevance -from moatless.find.identify import IdentifyCode -from moatless.find.search import SearchCode -from moatless.transition_rules import TransitionRule -from moatless.state import Finished, Rejected -from moatless.transitions import ( - search_and_code_transitions, - search_transitions, - code_transitions, -) - -# model = "claude-3-5-sonnet-20240620" - -# model = "gpt-4o-2024-05-13" -model = "azure/gpt-4o" - -# model = "openrouter/anthropic/claude-3.5-sonnet" - -global_params = { - "model": model, - "temperature": 0.2, - "max_tokens": 2000, - "max_prompt_file_tokens": 8000, -} - -state_params = { - SearchCode: { - "provide_initial_context": True, - "max_search_results": 75, - "initial_context_tokens": 6000, - "initial_search_results": 100, - "initial_context_spans_per_file": 5, - }, - IdentifyCode: {"expand_context": True}, - DecideRelevance: { - "finish_after_relevant_count": 1, - }, - PlanToCode: { - "max_tokens_in_edit_prompt": 750, - "expand_context_with_related_spans": False, - "finish_on_review": True, - }, - EditCode: { - "chain_of_thought": False, - "show_file_context": False, - "max_prompt_file_tokens": 8000, - }, -} - -index_store_dir = f"/home/albert/20240522-voyage-code-2" -repo_base_dir = "/tmp/repos" -evaluations_dir = "/home/albert/repos/albert/moatless/evaluations" - -search_and_code = search_and_code_transitions( - global_params=global_params, state_params=state_params -) - -identified_spans_but_failed_implementation = [ - "django__django-11583", - "django__django-11179", - "django__django-12286", - "django__django-12700", - "django__django-12708", - "django__django-13315", - "django__django-13933", - "django__django-14382", - "django__django-14608", - "django__django-14787", - "django__django-14999", - "django__django-15347", - "django__django-15789", - "django__django-16041", - "django__django-16046", - "django__django-16595", - "matplotlib__matplotlib-26020", - "matplotlib__matplotlib-24149", - "mwaskom__seaborn-3190", - "psf__requests-3362", - "pytest-dev__pytest-5692", - "scikit-learn__scikit-learn-11281", - "django__django-2708", - "scikit-learn__scikit-learn-13241", - "scikit-learn__scikit-learn-13779", - "scikit-learn__scikit-learn-14894", - "scikit-learn__scikit-learn-15535", - "scikit-learn__scikit-learn-25570", - "sympy__sympy-18621", - "sympy__sympy-23117", - "sympy__sympy-22714", - "sympy__sympy-24213", -] - -coding_test_set = [ - "django__django-11848", - "django__django-12308", - "django__django-12497", - "django__django-13551", - "django__django-13660", - "django__django-14238", - "django__django-14411", - "django__django-14787", - "django__django-16041", - "django__django-17051", - "matplotlib__matplotlib-24149", - "mwaskom__seaborn-3190", - "psf__requests-1963", - "pylint-dev__pylint-6506", - "pylint-dev__pylint-7993", - "pytest-dev__pytest-7432", - "scikit-learn__scikit-learn-13142", - "scikit-learn__scikit-learn-25570", - "sphinx-doc__sphinx-7975", - "sympy__sympy-12481", - "sympy__sympy-14396", - "sympy__sympy-14817", - "sympy__sympy-15609", - "sympy__sympy-16988", - "sympy__sympy-18189", - "sympy__sympy-18532", - "sympy__sympy-21847", - "sympy__sympy-22005", - "sympy__sympy-22714", - "sympy__sympy-24066", -] - -search_and_identify_set = [ - "matplotlib__matplotlib-25442", - "matplotlib__matplotlib-23562", - "pytest-dev__pytest-11148", - "sphinx-doc__sphinx-8721", - "sphinx-doc__sphinx-10325", - "scikit-learn__scikit-learn-15535", - "scikit-learn__scikit-learn-11281", - "astropy__astropy-6938", - "sympy__sympy-17022", - "sympy__sympy-17139", - "sympy__sympy-13031", - "django__django-15814", - "django__django-15498", - "django__django-12125", - "django__django-13964", - "django__django-11964", - "django__django-14580", - "django__django-17087", -] - - -def run_evaluation(): - max_file_context_lines = 1000 - - transitions = search_and_code_transitions( - state_params={ - PlanToCode: { - "max_prompt_file_tokens": 16000, - "max_tokens_in_edit_prompt": 500, - "max_file_context_lines": max_file_context_lines, - } - }, - ) - - -def evaluate_search(): - transitions = TransitionRules( - global_params=global_params, - state_params={ - SearchCode: {"max_search_results": 50, "provide_initial_context": True}, - }, - initial_state=SearchCode, - transitions=[ - TransitionRule(source=SearchCode, dest=Finished, trigger="did_search"), - TransitionRule(source=SearchCode, dest=Finished, trigger="finish"), - ], - ) - - evaluation_name = create_evaluation_name(model, "search") - - evaluation = Evaluation( - transitions=transitions, - evaluations_dir=evaluations_dir + "/search", - evaluation_name=evaluation_name, - index_store_dir=index_store_dir, - repo_base_dir=repo_base_dir, - max_file_context_tokens=16000, - litellm_callback="langfuse", - detailed_report=True, - ) - - evaluation.run_evaluation_with_moatless_dataset(use_test_subset=True) - - -def evaluate_search_and_identify( - resolved_by: Optional[int] = 4, - previous_trajectory_dir: Optional[str] = None, - instance_ids: Optional[list] = None, -): - transitions = search_transitions( - global_params=global_params, - state_params=state_params, - ) - - evaluation_name = create_evaluation_name("search_and_identify_3", model) - # evaluation_name = "20240624_search_and_identify_claude-3-5-sonnet-20240620" - - evaluation = Evaluation( - transitions=transitions, - evaluations_dir=evaluations_dir + "/search_and_identify", - evaluation_name=evaluation_name, - index_store_dir=index_store_dir, - repo_base_dir=repo_base_dir, - previous_trajectory_dir=previous_trajectory_dir, - max_file_context_tokens=16000, - litellm_callback="langfuse", - detailed_report=True, - ) - - evaluation.run_evaluation_with_moatless_dataset( - resolved_by=resolved_by, instance_ids=instance_ids - ) - - -def evaluate_search_and_code( - resolved_by: Optional[int], - previous_trajectory_dir: Optional[str] = None, - retry_state: Optional[str] = None, - instance_ids: Optional[list] = None, -): - evaluation_name = create_evaluation_name("search_and_code", model) - # evaluation_name = "20240624_search_and_code_2_claude-3-5-sonnet-20240620" - # evaluation_name = "20240623_moatless_claude-3.5-sonnet" - - evaluation = Evaluation( - transitions=search_and_code, - evaluations_dir=evaluations_dir + "/search_and_code", - evaluation_name=evaluation_name, - index_store_dir=index_store_dir, - repo_base_dir=repo_base_dir, - previous_trajectory_dir=previous_trajectory_dir, - retry_state=retry_state, - max_file_context_tokens=16000, - num_workers=3, - litellm_callback="langfuse", - detailed_report=True, - ) - - evaluation.run_evaluation_with_moatless_dataset( - resolved_by=resolved_by, - instance_ids=instance_ids, - ) - - -def evaluate_coding(): - evaluation_name = create_evaluation_name("coding", model) - # evaluation_name = "20240623_coding_2_claude-3.5-sonnet" - - evaluation = Evaluation( - transitions=code_transitions( - global_params=global_params, state_params=state_params - ), - use_expected_file_context=True, - evaluations_dir=evaluations_dir + "/coding", - evaluation_name=evaluation_name, - index_store_dir=index_store_dir, - repo_base_dir=repo_base_dir, - max_file_context_tokens=16000, - litellm_callback="langfuse", - detailed_report=True, - ) - - df = evaluation.run_evaluation_with_moatless_dataset(instance_ids=coding_test_set) - - -def evaluate_plan(previous_trajectory_dir: Optional[str] = None): - transitions = TransitionRules( - global_params=global_params, - state_params={ - SearchCode: { - "provide_initial_context": True, - "max_search_results": 75, - "initial_context_tokens": 6000, - "initial_search_results": 100, - "initial_context_spans_per_file": 5, - }, - PlanToCode: { - "max_prompt_file_tokens": 16000, - "max_tokens_in_edit_prompt": 750, - "expand_context_with_related_spans": False, - }, - }, - initial_state=SearchCode, - transitions=[ - TransitionRule(source=SearchCode, dest=IdentifyCode, trigger="did_search"), - TransitionRule(source=IdentifyCode, dest=SearchCode, trigger="search"), - TransitionRule(source=IdentifyCode, dest=DecideRelevance, trigger="finish"), - TransitionRule(source=DecideRelevance, dest=SearchCode, trigger="search"), - TransitionRule( - source=DecideRelevance, - dest=PlanToCode, - trigger="finish", - exclude_fields={"message"}, - ), - TransitionRule(source=PlanToCode, dest=Finished, trigger="edit_code"), - TransitionRule(source=PlanToCode, dest=Rejected, trigger="finish"), - TransitionRule(source=PlanToCode, dest=Rejected, trigger="reject"), - ], - ) - - evaluation_name = create_evaluation_name("search_and_plan_2", model) - - evaluation = Evaluation( - transitions=transitions, - evaluations_dir=evaluations_dir + "/search_and_plan", - evaluation_name=evaluation_name, - index_store_dir=index_store_dir, - repo_base_dir=repo_base_dir, - previous_trajectory_dir=previous_trajectory_dir, - retry_state="PlanToCode", - max_file_context_tokens=16000, - litellm_callback="langfuse", - detailed_report=True, - ) - - df = evaluation.run_evaluation_with_moatless_dataset( - instance_ids=identified_spans_but_failed_implementation - ) - - # print out instance id and if planned - for instance_id in df.index: - print(df.loc[instance_id, "instance_id"], df.loc[instance_id, "planned"]) - - -if __name__ == "__main__": - logging.basicConfig( - format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", - level=logging.INFO, - ) - logging.getLogger().setLevel(logging.INFO) - logging.getLogger("LiteLLM").setLevel(logging.WARNING) - logging.getLogger("Evaluator").setLevel(logging.INFO) - - # evaluate_coding() - # evaluate_search_and_identify() - evaluate_search_and_code( - 1, - "/home/albert/repos/albert/moatless/evaluations/20240623_moatless_claude-3.5-sonnet/trajs", - retry_state="PlanToCode", - ) - # evaluate_search_and_code() - # evaluate_search_and_code( - # # "/home/albert/repos/albert/moatless/evaluations/search_and_code/20240622_search_and_code_6_claude-3.5-sonnet/trajs" - # ) diff --git a/moatless/benchmark/create_dataset.py b/moatless/benchmark/create_dataset.py deleted file mode 100644 index fb294f29..00000000 --- a/moatless/benchmark/create_dataset.py +++ /dev/null @@ -1,155 +0,0 @@ -import json - -import pandas as pd - -from moatless.benchmark.swebench import setup_swebench_repo, sorted_instances -from moatless.benchmark.utils import get_file_spans_from_patch -from moatless.repository import FileRepository - -experiments_runs = [ - "20240402_sweagent_claude3opus", - "20240402_sweagent_gpt4", - "20240509_amazon-q-developer-agent-20240430-dev", - "20240523_aider", - "20240524_opencsg_starship_gpt4", - "20240530_autocoderover-v20240408", - "20240604_CodeR", - "20240612_IBM_Research_Agent101", - "20240612_marscode-agent-dev", - "20240612_MASAI_gpt4o", - "20240615_appmap-navie_gpt4o", - "20240617_factory_code_droid", - "20240617_moatless_gpt4o", -] - -dataset_path = ( - "/home/albert/repos/albert/moatless/datasets/swebench_lite_all_evaluations.json" -) - - -def read_predictions(pred_path: str): - predictions = {} - with open(pred_path) as f: - for line in f.readlines(): - prediction = json.loads(line) - predictions[prediction["instance_id"]] = prediction["model_patch"] - return predictions - - -def generate_report(): - results = {} - - experiments_dir = "/home/albert/repos/stuffs/experiments/evaluation/lite" - - runs = [] - for run_name in experiments_runs: - runs.append( - ( - run_name, - f"{experiments_dir}/{run_name}/all_preds.jsonl", - f"{experiments_dir}/{run_name}/results/results.json", - ) - ) - - runs.append( - ( - "autocoderover_v20240620", - "/home/albert/repos/stuffs/acr-experiments/evaluation/lite/20240621_autocoderover-v20240620/all_preds.jsonl", - "/home/albert/repos/stuffs/acr-experiments/evaluation/lite/20240621_autocoderover-v20240620/results.json", - ) - ) - - runs.append( - ( - "20240622_Lingma_Agent", - "/home/albert/repos/stuffs/alibaba-experiments/evaluation/lite/20240622_Lingma_Agent/all_preds.jsonl", - "/home/albert/repos/stuffs/alibaba-experiments/evaluation/lite/20240622_Lingma_Agent/results.json", - ) - ) - - for run_name, prediction_file, result_file in runs: - with open(result_file) as file: - final_report = json.load(file) - - resolved_tasks = final_report["resolved"] - predictions_by_id = read_predictions(prediction_file) - - results[run_name] = { - "resolved_tasks": resolved_tasks, - "predictions": predictions_by_id, - } - - evaluation_dataset = [] - - report = [] - - instances = sorted_instances( - split="test", dataset_name="princeton-nlp/SWE-bench_Lite" - ) - for instance in instances: - instance_id = instance["instance_id"] - expected_patch = instance["patch"] - repo_dir = setup_swebench_repo(instance, repo_base_dir="/tmp/repos_2") - file_repo = FileRepository(repo_dir) - - expected_file_spans = get_file_spans_from_patch(file_repo, expected_patch) - - evaluation_instance = { - "instance_id": instance_id, - "repo": instance["repo"], - "base_commit": instance["base_commit"], - "problem_statement": instance["problem_statement"], - "golden_patch": instance["patch"], - "expected_spans": expected_file_spans, - "resolved_by": [], - "alternative_spans": [], - } - - for run_name, _, _ in runs: - prediction = results[run_name]["predictions"].get(instance_id) - - if instance_id not in results[run_name]["resolved_tasks"]: - continue - - file_spans = get_file_spans_from_patch(file_repo, prediction) - - is_different = False - alternative_spans = {} - for file_path, span_ids in file_spans.items(): - if file_path in expected_file_spans: - alternative_spans[file_path] = span_ids - - if set(expected_file_spans[file_path]).difference(set(span_ids)): - is_different = True - - if is_different: - evaluation_instance["alternative_spans"].append( - {"run_name": run_name, "spans": alternative_spans} - ) - - resolved = { - "name": run_name, - "patch": prediction, - "updated_spans": file_spans, - "alternative_spans": alternative_spans, - } - - evaluation_instance["resolved_by"].append(resolved) - - report.append( - { - "instance_id": instance_id, - "resolved_by": len(evaluation_instance["resolved_by"]), - } - ) - - evaluation_dataset.append(evaluation_instance) - - with open(dataset_path, "w") as f: - json.dump(evaluation_dataset, f, indent=2) - - return pd.DataFrame(report) - - -if __name__ == "__main__": - df = generate_report() diff --git a/moatless/benchmark/evaluation.py b/moatless/benchmark/evaluation.py deleted file mode 100644 index 1d0b9980..00000000 --- a/moatless/benchmark/evaluation.py +++ /dev/null @@ -1,517 +0,0 @@ -import concurrent.futures -import json -import logging -import os -import subprocess -import time -import traceback -from collections import defaultdict -from datetime import datetime, timezone -from typing import Optional, Tuple - -import instructor -import litellm -import pandas as pd -from tqdm.auto import tqdm - -from moatless.benchmark.report_v2 import to_result, generate_md_report -from moatless.trajectory import Trajectory -from moatless.transition_rules import TransitionRules -from moatless.benchmark.swebench import ( - found_in_alternative_spans, - found_in_expected_spans, - get_repo_dir_name, - load_instance, - setup_swebench_repo, - sorted_instances, -) -from moatless.benchmark.utils import ( - get_missing_files, - trace_metadata, -) -from moatless.file_context import FileContext -from moatless.loop import AgenticLoop -from moatless.repository import FileRepository, GitRepository -from moatless.workspace import Workspace - -logger = logging.getLogger(__name__) - -TEST_SUBSET = [ - "astropy__astropy-14995", - "django__django-10914", - "django__django-11039", - "django__django-11179", - "django__django-12286", - "django__django-12453", - "django__django-12983", - "django__django-13230", - "django__django-13710", - "django__django-13757", - "django__django-14915", - "django__django-14999", - "django__django-15789", - "matplotlib__matplotlib-23913", - "matplotlib__matplotlib-23964", - "pydata__xarray-5131", - "pytest-dev__pytest-11143", - "pytest-dev__pytest-5692", - "pytest-dev__pytest-7373", - "scikit-learn__scikit-learn-13142", - "scikit-learn__scikit-learn-13241", - "scikit-learn__scikit-learn-13439", - "scikit-learn__scikit-learn-13496", - "scikit-learn__scikit-learn-13779", - "scikit-learn__scikit-learn-14894", - "scikit-learn__scikit-learn-25570", - "sympy__sympy-13480", - "sympy__sympy-13647", - "sympy__sympy-20212", - "sympy__sympy-24213", -] - - -class Evaluation: - def __init__( - self, - index_store_dir: str, - repo_base_dir: str, - evaluations_dir: str, - evaluation_name: str, - transitions: TransitionRules, - instructor_mode: instructor.Mode | None = None, - max_cost: float = 0.5, - max_transitions: int = 25, - max_expansions: int = 2, - max_file_context_tokens: int = 16000, - markdown_report: bool = False, - litellm_callback: Optional[str] = None, - previous_trajectory_dir: Optional[str] = None, - retry_state: Optional[str] = None, - num_workers: int = 1, - detailed_report: bool = False, - ): - self.index_store_dir = index_store_dir - self.repo_base_dir = repo_base_dir - self.evaluations_dir = evaluations_dir - self.num_workers = num_workers - self.detailed_report = detailed_report - self.markdown_report = markdown_report - - self.evaluation_name = evaluation_name - self.max_file_context_tokens = max_file_context_tokens - self.max_cost = max_cost - self.max_expansions = max_expansions - self.max_transitions = max_transitions - self.instructor_mode = instructor_mode - - self.transitions = transitions - - litellm.drop_params = True - - self.evaluation_dir = f"{evaluations_dir}/{evaluation_name}" - self.trajectory_dir = f"{self.evaluations_dir}/{evaluation_name}/trajs" - self.logs_dir = f"{self.evaluations_dir}/{evaluation_name}/prompt_logs" - self.predictions_path = f"{self.evaluation_dir}/all_preds.jsonl" - - self.previous_trajectory_dir = previous_trajectory_dir - self.retry_state = retry_state - - logger.info(f"Save trajectories to directory: {self.trajectory_dir}") - if not os.path.exists(self.trajectory_dir): - os.makedirs(self.trajectory_dir) - - logger.info(f"Save logs to directory: {self.logs_dir}") - if not os.path.exists(self.logs_dir): - os.makedirs(self.logs_dir) - - if litellm_callback: - litellm.success_callback = [litellm_callback] - litellm.failure_callback = [litellm_callback] - - # This is only to set instances as resolved after all evaluations have been run to generate the report - # TODO: Run swe-bench-docker after the prediction is generated - result_file = f"{self.evaluation_dir}/result.json" - if os.path.exists(result_file): - with open(os.path.join(result_file)) as f: - self.report = json.load(f) - else: - self.report = {"resolved_ids": []} - - def run_evaluation_with_moatless_dataset( - self, - resolved_by: Optional[int] = None, - use_test_subset: bool = False, - instance_ids: list[str] | None = None, - ): - file_path = os.path.join( - os.path.dirname(__file__), "swebench_lite_all_evaluations.json" - ) - with open(file_path) as f: - instances = json.load(f) - - instances = sorted(instances, key=lambda x: len(x["resolved_by"]), reverse=True) - - if use_test_subset: - instances = [ - instance - for instance in instances - if instance["instance_id"] in TEST_SUBSET - ] - - if instance_ids: - instances = [ - instance - for instance in instances - if instance["instance_id"] in instance_ids - ] - - if resolved_by: - instances = [ - instance - for instance in instances - if len(instance["resolved_by"]) >= resolved_by - ] - - return self._run_evaluation(instances) - - def run_swebench_evaluation( - self, - dataset: str = "princeton-nlp/SWE-bench_Lite", - split="test", - instance_ids: list[str] | None = None, - ): - instances = sorted_instances(dataset, split) - - if instance_ids: - instances = [ - instance - for instance in instances - if instance["instance_id"] in instance_ids - ] - - return self._run_evaluation_simple(instances) - - def run_single_instance( - self, - instance_id: str, - dataset: str = "princeton-nlp/SWE-bench_Lite", - split="test", - ) -> dict: - instance = load_instance(instance_id, dataset, split) - trajectory = self._evaluate_instance(instance) - return to_result(instance, trajectory, self.report) - - def _evaluate_instance(self, instance: dict, retry: bool = False) -> Trajectory: - instance_id = instance["instance_id"] - trajectory_path = os.path.join(self.trajectory_dir, f"{instance_id}.json") - prompt_log_dir = os.path.join(self.logs_dir, f"{instance_id}") - if not os.path.exists(prompt_log_dir): - os.makedirs(prompt_log_dir) - - if os.path.exists(trajectory_path) and not retry: - # TODO: Retry when failed or not finished? - return Trajectory.load(trajectory_path) - - repo_dir = setup_swebench_repo(instance) - persist_dir = os.path.join(self.index_store_dir, get_repo_dir_name(instance_id)) - workspace = Workspace.from_dirs( - repo_path=repo_dir, index_dir=persist_dir, max_file_context_tokens=16000 - ) - - problem_statement = instance["problem_statement"] - - previous_actions = [] - if self.previous_trajectory_dir: - previous_trajectory_path = os.path.join( - self.previous_trajectory_dir, f"{instance_id}.json" - ) - previous_trajectory = self.read_trajectory(previous_trajectory_path) - if previous_trajectory: - previous_actions = self.get_actions(previous_trajectory) - - metadata = trace_metadata( - instance_id=instance_id, - session_id=self.evaluation_name, - trace_name="moatless", - ) - - loop = AgenticLoop( - transition_rules=self.transitions, - workspace=workspace, - metadata=metadata, - mocked_actions=previous_actions, - reset_mocks_at_state=self.retry_state, - trajectory_path=trajectory_path, - prompt_log_dir=prompt_log_dir, - max_cost=self.max_cost, - max_transitions=self.max_transitions, - max_actions=self.max_expansions, - instructor_mode=self.instructor_mode, - ) - - info = { - "evaluation_name": self.evaluation_name, - "instance_id": instance["instance_id"], - } - - start_time = time.time() - try: - response = loop.run(problem_statement) - info["status"] = response.status - except Exception: - info["error"] = traceback.format_exc() - info["status"] = "error" - logging.exception(f"Error in evaluation of {instance['instance_id']} ") - - info["duration"] = time.time() - start_time - info["total_cost"] = loop.total_cost() - - if isinstance(workspace.file_repo, GitRepository): - diff = workspace.file_repo.diff() - else: - workspace.save() - - output = subprocess.run( - ["git", "diff"], - capture_output=True, - text=True, - cwd=repo_dir, - ) - - if output: - diff = output.stdout - else: - diff = None - - info["submission"] = diff - - loop.trajectory.save_info(info) - return loop.trajectory - - def _process_instance(self, instance) -> Tuple[dict, str]: - trajectory = self._evaluate_instance(instance) - - result = to_result(instance, trajectory, self.report) - submission = trajectory.info.get("submission", "") - - if self.markdown_report: - try: - md_report = generate_md_report(trajectory, instance) - if not os.path.exists(f"{self.evaluation_dir}/reports"): - os.makedirs(f"{self.evaluation_dir}/reports") - with open( - f"{self.evaluation_dir}/reports/{instance['instance_id']}.md", - "w", - ) as file: - file.write(md_report) - except Exception: - logging.exception( - f"Error in generating report for {instance['instance_id']} " - ) - - return result, submission - - def _process_repo_group(self, repo, instances): - results = [] - transition_results = [] - for i, instance in enumerate(instances): - logger.info( - f"Processing {instance['instance_id']} ({i+1}/{len(instances)} in {repo})" - ) - - trajectory = self._evaluate_instance(instance) - if not trajectory: - return None, None - - result = to_result(instance, trajectory, report=self.report) - results.append(result) - - try: - md_report = generate_md_report(trajectory, instance) - if not os.path.exists(f"{self.evaluation_dir}/reports"): - os.makedirs(f"{self.evaluation_dir}/reports") - with open( - f"{self.evaluation_dir}/reports/{instance['instance_id']}.md", - "w", - ) as file: - file.write(md_report) - except Exception: - logging.exception( - f"Error in generating report for {instance['instance_id']} " - ) - - prediction = { - "model_name_or_path": self.evaluation_name, - "instance_id": result["instance_id"], - "model_patch": trajectory["info"].get("submission", ""), - } - - with open(self.predictions_path, "a") as file: - json_string = json.dumps(prediction) - file.write(json_string + "\n") - - return results, transition_results - - def _run_evaluation(self, instances: list[dict]): - if self.detailed_report or self.num_workers > 1: - self._run_evaluation_detailed(instances) - else: - self._run_evaluation_simple(instances) - - def _run_evaluation_detailed(self, instances: list[dict]): - error = 0 - - with open(self.predictions_path, "w") as file: - file.write("") - - repo_groups = defaultdict(list) - for instance in instances: - repo_groups[instance.get("repo")].append(instance) - - results = [] - transition_results = [] - - logger.info(f"Processing {len(instances)} instances with {len(repo_groups)} repos with {self.num_workers} workers") - - with concurrent.futures.ProcessPoolExecutor( - max_workers=self.num_workers - ) as executor: - futures = [] - for repo, group in repo_groups.items(): - futures.append(executor.submit(self._process_repo_group, repo, group)) - - pbar = tqdm(concurrent.futures.as_completed(futures), total=len(futures)) - - for future in pbar: - try: - group_results, group_transition_results = future.result() - if not group_results: - logger.warning("Error in processing repo group") - error += 1 - continue - except Exception: - error += 1 - logger.exception("Error in processing repo group") - continue - - results.extend(group_results) - transition_results.extend(group_transition_results) - - df = pd.DataFrame(results) - df.to_csv( - f"{self.evaluation_dir}/result.csv", - index=False, - sep=",", - decimal=",", - quoting=1, - ) - - avg_duration = df["duration"].mean() - avg_cost = df["total_cost"].mean() - total_identified = df["identified"].sum() - total_processed = len(df) - - logger.info(f"Average duration: {avg_duration:.2f} seconds") - logger.info(f"Average cost: ${avg_cost:.4f}") - logger.info(f"Total identified: {total_identified}") - logger.info(f"Total processed: {total_processed}") - logger.info(f"Error count: {error}") - - if transition_results: - df_search = pd.DataFrame(transition_results) - df_search.to_csv( - f"{self.evaluation_dir}/transition_results.csv", - index=False, - sep=",", - decimal=",", - quoting=1, - ) - - def _run_evaluation_simple(self, instances: list[dict]): - with open(self.predictions_path, "w") as file: - file.write("") - - count = 0 - identified = 0 - generated = 0 - error = 0 - - sum_duration = 0 - sum_total_cost = 0 - - stats = {} - pbar = tqdm(instances) - for instance in pbar: - trajectory = self._evaluate_instance(instance) - if not trajectory: - continue - - result, transition_result = to_result(instance, trajectory, report=self.report) - - sum_duration += result["duration"] - sum_total_cost += result["total_cost"] - - if result["status"] == "error": - error += 1 - - if result["status"] in ["generated", "failed", "resolved"]: - generated += 1 - - if result["identified"] is not None: - identified += 1 - - count += 1 - - if sum_duration > 0: - stats["avg_duration"] = sum_duration / count - - if sum_total_cost > 0: - stats["avg_cost"] = sum_total_cost / count - stats["total_cost"] = sum_total_cost - - if identified > 0: - success_rate = (identified / count) * 100 - stats["identified"] = f"{success_rate:.2f}%" - - if generated > 0: - success_rate = (generated / count) * 100 - stats["generated"] = f"{success_rate:.2f}%" - - stats["error"] = error - - pbar.set_postfix(stats) - - prediction = { - "model_name_or_path": self.evaluation_name, - "instance_id": instance["instance_id"], - "model_patch": trajectory["info"].get("submission", ""), - } - - with open(self.predictions_path, "a") as file: - json_string = json.dumps(prediction) - file.write(json_string + "\n") - - - def read_trajectory(self, path) -> Optional[dict]: - if os.path.exists(path): - with open(path) as f: - return json.load(f) - else: - return None - - def get_actions(self, trajectory: dict): - actions = [] - for transition in trajectory["transitions"]: - for action in transition["actions"]: - actions.append(action) - return actions - - -def create_evaluation_name( - name: str, - model: str, -): - date_str = datetime.now(tz=timezone.utc).strftime("%Y%m%d") - model_name = model.split("/")[-1] - return f"{date_str}_{name}_{model_name}" - diff --git a/moatless/benchmark/loop_evaluation.py b/moatless/benchmark/loop_evaluation.py new file mode 100644 index 00000000..7faa2e36 --- /dev/null +++ b/moatless/benchmark/loop_evaluation.py @@ -0,0 +1,467 @@ +import concurrent.futures +import gc +import json +import logging +import os +import random +import shutil +import threading +import time +import traceback +from collections import defaultdict +from datetime import datetime, timezone +from typing import Optional, Any + +import litellm +from pydantic import BaseModel, Field +from tqdm.auto import tqdm + +from moatless.agent.agent import ActionAgent, MessageHistoryType +from moatless.agent.code_agent import CodingAgent + +from moatless.benchmark.swebench import ( + create_repository, + create_index, +) +from moatless.benchmark.utils import get_moatless_instance +from moatless.completion.completion import CompletionModel +from moatless.completion.log_handler import LogHandler +from moatless.loop import AgenticLoop + +logger = logging.getLogger(__name__) + + +class Evaluation: + def __init__( + self, + evaluations_dir: str, + evaluation_name: str, + dataset_name: str = "princeton-nlp/SWE-bench_Lite", + repo_base_dir: str | None = None, + report_mode: str | None = None, + num_workers: int = 1, + use_testbed: bool = False, + completion_model: CompletionModel | None = None, + agent: ActionAgent | None = None, + max_iterations: int = 30, + max_cost: float = 1.0, + evaluate_results: bool = False + ): + if not completion_model and not agent: + raise RuntimeError("Either completion_model or agent must be provided") + + self.evaluations_dir = evaluations_dir + self.num_workers = num_workers + self.report_mode = report_mode + self.dataset_name = dataset_name + self.evaluation_name = evaluation_name + self.evaluate_results = evaluate_results + + self.use_testbed = use_testbed + + self.max_iterations = max_iterations + self.max_cost = max_cost + + self.agent = agent + self.completion_model = completion_model + + self.evaluation_dir = f"{evaluations_dir}/{evaluation_name}" + logger.info(f"Evaluation directory: {self.evaluation_dir}") + if not os.path.exists(self.evaluation_dir): + os.makedirs(self.evaluation_dir) + + self.predictions_path = f"{self.evaluation_dir}/all_preds.jsonl" + + self.repo_base_dir = repo_base_dir or os.getenv("REPO_DIR", "/tmp/repos") + + completion_log_dir = f"{self.evaluation_dir}/completion_logs" + if not os.path.exists(completion_log_dir): + os.makedirs(completion_log_dir) + litellm.callbacks = [LogHandler(completion_log_dir)] + + self.status_file = f"{self.evaluation_dir}/status_summary.json" + self.event_file = f"{self.evaluation_dir}/event_log.json" + self.file_lock = threading.Lock() + self.statuses = defaultdict(dict) + self.events = defaultdict(list) + + def update_status(self, instance_id: str, status: str): + with self.file_lock: + if instance_id not in self.statuses: + self.statuses[instance_id] = { + "created": datetime.now().isoformat(), + } + + self.statuses[instance_id].update( + {"last_updated": datetime.now().isoformat(), "status": status} + ) + self._save_statuses() + + def log_event(self, instance_id: str, event: str): + with self.file_lock: + self.events[instance_id].append( + {"timestamp": datetime.now().isoformat(), "event": event} + ) + self._save_events() + + def _save_statuses(self): + with open(self.status_file, "w") as f: + json.dump(self.statuses, f, indent=2) + + def _save_events(self): + with open(self.event_file, "w") as f: + json.dump(self.events, f, indent=2) + + def run_evaluation( + self, + split: str = "lite", + instance_ids: list[str] | None = None, + exclude_instance_ids: list[str] | None = None, + repos: list[str] | None = None, + ignore_repos: list[str] | None = None, + min_resolved: Optional[int] = None, + max_resolved: Optional[int] = None, + ): + file_path = os.path.join( + os.path.dirname(__file__), f"swebench_{split}_all_evaluations.json" + ) + with open(file_path) as f: + instances = json.load(f) + + random.shuffle(instances) + + logger.info(f"Loaded {len(instances)} instances from {file_path}") + + if instance_ids: + instances = [ + instance + for instance in instances + if instance["instance_id"] in instance_ids + ] + + logger.info( + f"Running evaluation for {len(instances)} instances filtered by instance_ids" + ) + + if exclude_instance_ids: + instances = [ + instance + for instance in instances + if instance["instance_id"] not in exclude_instance_ids + ] + + logger.info( + f"Running evaluation for {len(instances)} instances filtered by exclude_instance_ids" + ) + + if min_resolved is not None: + instances = [ + instance + for instance in instances + if len(instance["resolved_by"]) >= min_resolved + or ( + min_resolved == 1 + and instance.get("llm_monkeys", {}).get("resolved_rate", 0) > 0 + ) + ] + + logger.info( + f"Running evaluation for {len(instances)} instances filtered by min_resolved >= {min_resolved}" + ) + + if max_resolved is not None: + instances = [ + instance + for instance in instances + if len(instance["resolved_by"]) <= max_resolved + ] + + logger.info( + f"Running evaluation for {len(instances)} instances filtered by max_resolved <= {max_resolved}" + ) + + if repos: + instances = [ + instance for instance in instances if instance["repo"] in repos + ] + + logger.info( + f"Running evaluation for {len(instances)} instances filtered by repos" + ) + + if ignore_repos: + instances = [ + instance + for instance in instances + if instance["repo"] not in ignore_repos + ] + + if instances: + logger.info( + f"Running evaluation for {len(instances)} instances after filtering by ignore_repos" + ) + + return self._run_evaluation(instances) + + def evaluate_instance(self, instance: dict): + instance_id = instance["instance_id"] + instance_dir = os.path.join(self.evaluation_dir, f"{instance_id}") + trajectory_path = os.path.join(instance_dir, "trajectory.json") + + if not os.path.exists(self.evaluation_dir): + os.makedirs(trajectory_path) + + log_dir = os.path.join(instance_dir, "logs") + if not os.path.exists(log_dir): + os.makedirs(log_dir) + + eval_result_path = os.path.join(instance_dir, "eval_result.json") + if os.path.exists(eval_result_path): + try: + with open(eval_result_path) as f: + eval_result = json.load(f) + except json.JSONDecodeError as e: + logger.error( + f"Failed to parse eval result from {eval_result_path}. Will remove file to start over. Error: {e}" + ) + os.remove(eval_result_path) + eval_result = { + "node_results": {}, + } + else: + eval_result = { + "node_results": {}, + } + + logger.info(f"Evaluating {instance_id}") + problem_statement = f"\n{instance['problem_statement']}\n" + + runtime = None + repository = None + + self.update_status(instance_id, "started") + self.log_event(instance_id, "evaluate_instance_initiated") + + try: + loop = None + + if os.path.exists(trajectory_path): + try: + persisted_loop = AgenticLoop.from_file(trajectory_path) + if persisted_loop.is_finished(): + logger.info(f"Found completed trajectory for {instance_id}") + loop = persisted_loop + except json.JSONDecodeError as e: + logger.error( + f"Failed to parse trajectory from {trajectory_path}. Will remove file to start over. Error: {e}" + ) + os.remove(trajectory_path) + + if not loop: + self.log_event(instance_id, "workspace_created") + + metadata: dict[str, Any] = { + "evaluation_name": self.evaluation_name, + "instance_id": instance["instance_id"], + } + + repository = create_repository( + instance, repo_base_dir=self.repo_base_dir + ) + code_index = create_index(instance, repository=repository) + + if self.use_testbed: + from moatless.runtime.testbed import TestbedEnvironment + + runtime = TestbedEnvironment( + repository=repository, + instance=instance, + log_dir=log_dir, + dataset_name=self.dataset_name, + ) + else: + runtime = None + + if os.path.exists(trajectory_path): + loop = AgenticLoop.from_file( + trajectory_path, + repository=repository, + runtime=runtime, + code_index=code_index, + ) + else: + agent = self.agent or CodingAgent.create( + completion_model=self.completion_model, + repository=repository, + code_index=code_index, + runtime=runtime, + ) + + agent_role = f"""You are an autonomous AI assistant and a core member of the development team for the {instance["repo"]} project. As a senior developer on the team, you have deep knowledge of the codebase and best practices.""" + agent.system_prompt = f"{agent_role}\n\n{agent.system_prompt}" + + loop = AgenticLoop.create( + message=problem_statement, + repository=repository, + agent=agent, + max_iterations=self.max_iterations, + max_cost=self.max_cost, + metadata=metadata, + persist_path=trajectory_path, + ) + self.log_event(instance_id, "agent_loop_execution_started") + + if loop and "error" in eval_result: + del eval_result["error"] + with open(eval_result_path, "w") as f: + json.dump(eval_result, f, indent=2) + + loop.run() + self.log_event(instance_id, "agent_loop_execution_completed") + + start_time = time.time() + try: + last_node = loop.get_last_node() + if not last_node: + logger.error(f"No last node found for {instance_id}") + eval_result["status"] = "no_last_node" + return eval_result + else: + patch = last_node.file_context.generate_git_patch() + if not patch: + logger.error(f"No patch generated for {instance_id} and last node {last_node.node_id}. File context: {last_node.file_context.model_dump()}") + + eval_result["status"] = "completed" + if not patch: + logger.warning(f"No patch generated for {instance_id}") + eval_result["status"] = "no_patch" + return eval_result + else: + self.save_prediction(instance_id, patch) + + if not self.evaluate_results: + return eval_result + + if "node_results" not in eval_result: + eval_result["node_results"] = {} + + if str(last_node.node_id) in eval_result["node_results"]: + return eval_result + + if self.use_testbed and patch: + + if not runtime: + repository = create_repository( + instance, repo_base_dir=self.repo_base_dir + ) + from testbeds.sdk import TestbedSDK + from moatless.runtime.testbed import TestbedEnvironment + + runtime = TestbedEnvironment( + testbed_sdk=TestbedSDK(), + repository=repository, + instance=instance, + log_dir=log_dir, + enable_cache=True, + ) + + start_time = time.time() + result = runtime.evaluate(patch=patch) + if not result: + logger.error(f"Error in evaluating patch for {instance_id}") + else: + eval_result["node_results"][str(last_node.node_id)] = ( + result.model_dump() + ) + eval_result["status"] = "resolved" if result.resolved else "failed" + + except Exception: + eval_result["error"] = traceback.format_exc() + eval_result["status"] = "error" + logging.exception(f"Error in evaluation of {instance['instance_id']} ") + finally: + eval_result["duration"] = time.time() - start_time + loop.persist(trajectory_path) + + with open(eval_result_path, "w") as f: + json.dump(eval_result, f, indent=2) + self.log_event(instance_id, "evaluation_completed") + self.update_status(instance_id, eval_result["status"]) + + return eval_result + + except Exception: + logger.exception(f"Error in processing instance {instance_id}") + self.log_event(instance_id, "evaluation_error") + self.update_status(instance_id, "error") + return None + + finally: + with open(eval_result_path, "w") as f: + json.dump(eval_result, f, indent=2) + + # Clean up + if repository: + shutil.rmtree(repository.repo_dir, ignore_errors=True) + + del runtime + del repository + del loop + gc.collect() + + def save_prediction(self, instance_id, submission): + with self.file_lock: + prediction = { + "model_name_or_path": self.evaluation_name, + "instance_id": instance_id, + "model_patch": submission, + } + with open(self.predictions_path, "a") as file: + json_string = json.dumps(prediction) + file.write(json_string + "\n") + + def _run_evaluation(self, instances: list[dict]): + error = 0 + + with open(self.predictions_path, "w") as file: + file.write("") + + results = [] + + logger.info( + f"Processing {len(instances)} instances with {self.num_workers} workers" + ) + + with concurrent.futures.ThreadPoolExecutor( + max_workers=self.num_workers + ) as executor: + futures = [ + executor.submit(self.evaluate_instance, instance) + for instance in instances + ] + + pbar = tqdm(concurrent.futures.as_completed(futures), total=len(futures)) + + for future in pbar: + try: + result = future.result() + except Exception: + error += 1 + logger.exception("Error in processing instance") + + logger.info(f"Completed processing with {error} errors") + self.update_status("all", "evaluation_completed") + + def read_trajectory(self, path) -> dict | None: + if os.path.exists(path): + with open(path) as f: + return json.load(f) + else: + return None + + def get_actions(self, trajectory: dict): + actions = [] + for transition in trajectory["transitions"]: + for action in transition["actions"]: + actions.append(action["action"]) + return actions diff --git a/moatless/benchmark/report_v1.py b/moatless/benchmark/report_v1.py deleted file mode 100644 index 65a5a403..00000000 --- a/moatless/benchmark/report_v1.py +++ /dev/null @@ -1,418 +0,0 @@ -import json -import logging -import os - -from moatless import FileRepository -from moatless.benchmark.swebench import found_in_expected_spans, found_in_alternative_spans, setup_swebench_repo -from moatless.benchmark.utils import get_missing_files -from moatless.file_context import FileContext - -logger = logging.getLogger(__name__) - - -def to_result(instance: dict, trajectory: dict, report: dict | None) -> tuple[dict, list]: - """ - Generate reports from saved trajectories with version 1 format. - """ - - info = trajectory["info"] - - resolved = report and info.get("instance_id", "") in report["resolved"] - - try: - transitions = [] - result = { - "instance_id": instance["instance_id"], - "duration": info.get("duration", 0), - "total_cost": info.get("total_cost", 0), - "resolved_by": (len(instance.get("resolved_by", []))), - "status": None, - "transitions": len(trajectory["transitions"]), - "edited": False, - "planned": False, - "identified": None, - "expected_identified": None, - "alt_identified": None, - "found_in_search": None, - "file_identified": None, - "file_in_search": None, - "edit_retries": 0, - "has_diff": False, - "lint_codes": None, - "review": False, - "p_query": 0, - "p_file": 0, - "p_code": 0, - "p_class": 0, - "p_function": 0, - "lints": "", - } - - lint_codes = set() - search_results_spans = {} - identified_spans = {} - planned_spans = {} - edited_spans = {} - - id_iterations = 0 - search_iterations = 0 - - if instance.get("expected_spans"): - for transition in trajectory["transitions"]: - if transition["name"] not in result: - result[transition["name"]] = 0 - result[f"{transition['name']}_cost"] = 0 - - result[transition["name"]] += 1 - - expected_span_str = "" - for file_path, span_ids in instance["expected_spans"].items(): - expected_span_str += f"{file_path}: {span_ids} " - - transition_result = { - "instance_id": instance["instance_id"], - "resolved": resolved, - "name": transition["name"], - "cost": 0, - "expected_spans": expected_span_str, - "actual_spans": "", - } - - if not transition["actions"]: - continue - - for traj_action in transition["actions"]: - result[f"{transition['name']}_cost"] += traj_action.get( - "completion_cost", 0 - ) - transition_result["cost"] += traj_action.get( - "completion_cost", 0 - ) - - if transition["name"] == "SearchCode": - search_iterations += 1 - - action = transition["actions"][-1] - - if "search_requests" in action["action"]: - for search_request in action["action"]["search_requests"]: - if search_request.get("query"): - result["p_query"] += 1 - - if search_request.get("file_pattern"): - result["p_file"] += 1 - - if search_request.get("code_snippet"): - result["p_code"] += 1 - - if search_request.get( - "class_name" - ) or search_request.get("class_names"): - result["p_class"] += 1 - - if search_request.get( - "function_name" - ) or search_request.get("function_names"): - result["p_function"] += 1 - - if "output" in action and action.get("output"): - output = action["output"] - - if "query" in output: - result["p_query"] += 1 - - if "file_pattern" in output: - result["p_file"] += 1 - - if "code_snippet" in output: - result["p_code"] += 1 - - if "class_name" in output or "class_names" in output: - result["p_class"] += 1 - - if "function_name" in output or "function_names" in output: - result["p_function"] += 1 - - if output.get("ranked_spans"): - for ranked_span in output["ranked_spans"]: - if ( - ranked_span["file_path"] - not in search_results_spans - ): - search_results_spans[ - ranked_span["file_path"] - ] = [] - search_results_spans[ - ranked_span["file_path"] - ].append(ranked_span["span_id"]) - - if not result["found_in_search"] and ( - found_in_expected_spans( - instance, search_results_spans - ) - or found_in_alternative_spans( - instance, search_results_spans - ) - ): - result["found_in_search"] = search_iterations - - if not result["file_in_search"]: - missing_files = get_missing_files( - instance["expected_spans"], - search_results_spans, - ) - if not missing_files: - result["file_in_search"] = search_iterations - - if transition["name"] == "IdentifyCode": - id_iterations += 1 - - action = transition["actions"][-1] - if action.get("action"): - identified_str = "" - if action["action"].get("identified_spans"): - for span in action["action"]["identified_spans"]: - identified_str += ( - f"{span['file_path']}: {span['span_ids']} " - ) - if span["file_path"] not in identified_spans: - identified_spans[span["file_path"]] = [] - - transition_result["actual_spans"] += ( - f"{span['file_path']}: {','.join(span['span_ids'])} " - ) - for span_id in span["span_ids"]: - identified_spans[span["file_path"]].append( - span_id - ) - result["identified_spans"] = identified_str - - if not result["file_identified"]: - missing_files = get_missing_files( - instance["expected_spans"], - identified_spans, - ) - if not missing_files: - result["file_identified"] = id_iterations - - if result[ - "expected_identified" - ] is None and found_in_expected_spans( - instance, identified_spans - ): - result["expected_identified"] = id_iterations - - if result[ - "alt_identified" - ] is None and found_in_alternative_spans( - instance, identified_spans - ): - result["alt_identified"] = id_iterations - - if result.get("alt_identified") or result.get( - "expected_identified" - ): - result["identified"] = min( - result.get("alt_identified") or 1000, - result.get("expected_identified") or 1000, - ) - - if transition["name"] == "PlanToCode": - action = transition["actions"][-1]["action"] - if action.get("action") == "review": - result["review"] = True - - if "file_path" in action: - if "span_id" not in action: - logger.warning( - f"Span id missing in planning action in {instance['instance_id']}" - ) - else: - file_path = action["file_path"] - if file_path not in planned_spans: - planned_spans[file_path] = [] - planned_spans[file_path].append(action["span_id"]) - transition_result["actual_spans"] = ( - f"{file_path}: {action['span_id']} " - ) - - if not result.get("planned") and ( - found_in_expected_spans( - instance, - planned_spans, - ) - or found_in_alternative_spans(instance, planned_spans) - ): - result["planned"] = True - - if transition["name"] == "EditCode": - result["edit_retries"] = len(transition["actions"]) - 1 - - action = transition["actions"][-1] - output = action.get("output", {}) - - if output: - edited = output.get("diff") - - if edited: - result["has_diff"] = True - - for lint in output.get("verification_errors", []): - lint_codes.add(lint["code"]) - - if edited and "file_path" in transition["state"]: - file_path = transition["state"]["file_path"] - if file_path not in edited_spans: - edited_spans[file_path] = [] - edited_spans[file_path].append( - transition["state"]["span_id"] - ) - transition_result["actual_spans"] = ( - f"{file_path}: {transition['state']['span_id']} " - ) - - if not result.get("edited") and ( - found_in_expected_spans( - instance, - edited_spans, - ) - or found_in_alternative_spans(instance, edited_spans) - ): - result["edited"] = True - - transitions.append(transition_result) - - if result.get("alt_identified") or result.get("expected_identified"): - result["identified"] = min( - result.get("alt_identified") or 1000, - result.get("expected_identified") or 1000, - ) - - result["expected_files"] = list(instance["expected_spans"].keys()) - result["edited_files"] = list(edited_spans.keys()) - result["identified_spans"] = sum( - [len(v) for v in identified_spans.values()] - ) - - result["lints"] = ",".join(lint_codes) - - if report and info.get("instance_id", "") in report["resolved"]: - result["status"] = "resolved" - elif result["edited"]: - result["status"] = "edited" - elif result["identified"]: - result["status"] = "identified" - elif result["found_in_search"]: - result["status"] = "found_in_search" - elif result["file_identified"]: - result["status"] = "file_identified" - else: - result["status"] = "" - - if "error" in info: - result["error"] = info["error"].split("\n")[0] - else: - result["error"] = "" - - except Exception as e: - raise e - - return result, transitions - - -def generate_md_report(trajectory: dict, instance: dict): - info = trajectory["info"] - markdown = f"# {instance['instance_id']}\n" - - markdown += "\n## Problem statement\n" - markdown += f"```\n{instance['problem_statement']}\n```\n" - - if "error" in trajectory["info"]: - markdown += "\n## Error\n" - markdown += f"```\n{trajectory['info']['error']}\n```\n" - else: - markdown += "\n## Prediction\n" - markdown += f"```diff\n{info['submission']}\n```\n" - - markdown += "\n## Golden patch\n" - markdown += f"```diff\n{instance['golden_patch']}\n```\n" - - markdown += "\n## Trajectory\n" - - repo_dir = setup_swebench_repo(instance) - file_repo = FileRepository(repo_dir) - - for j, step in enumerate(trajectory["transitions"]): - for i, traj_action in enumerate(step["actions"]): - state_name = step['state'] - markdown += f"### {j+1} {state_name} ({i+1})\n\n" - - if not traj_action.get("action"): - continue - action = traj_action["action"] - - if state_name == "PlanToCode": - if action.get("scratch_pad"): - markdown += "*" + action["scratch_pad"] + "*" - - if action.get("instructions"): - markdown += f"\n\n * {action['instructions']}" - - if action.get("file_path"): - markdown += f"\n * {action['file_path']}" - - if action.get("span_id"): - markdown += f"\n * {action['span_id']}" - - if action.get("file_path") and action.get("span_id"): - markdown += "\n\n#### File context \n\n" - try: - file_context = FileContext(file_repo) - file_context.add_span_to_context( - action.get("file_path"), - action.get("span_id"), - ) - markdown += file_context.create_prompt( - show_outcommented_code=True - ) - except Exception as e: - logger.error(e) - - if state_name == "EditCode": - markdown += "#### LLM Response\n\n" - markdown += f"```\n{action.get('content', '')}\n```\n" - - output = traj_action.get("output") - if output: - if output.get("diff"): - markdown += "#### Diff\n\n" - markdown += f"```diff\n{output['diff']}\n```\n" - - if output.get("errors"): - markdown += "#### Errors\n\n" - markdown += f"{output['errors']}\n\n" - - if output.get("message"): - markdown += "#### Message\n\n" - markdown += f"{output['message']}\n\n" - - if state_name == "ClarifyCodeChange": - if action.get("thoughts"): - markdown += "*" + action["thoughts"] + "*" - - if action.get("output") and action.get("output").get("start_line"): - markdown += f"\n* Start Line: {action['output']['start_line']}\n" - markdown += f"\n* End Line: {action['output']['end_line']}\n" - - if state_name == "Finished": - markdown += f"*{action['properties']['message']}*\n" - - if state_name == "Rejected": - markdown += f"*{action['properties']['message']}*\n" - - markdown += "## Alternative patches\n" - for alternative in instance["resolved_by"]: - markdown += f"### {alternative['name']}\n" - markdown += f"```diff\n{alternative['patch']}\n```\n" - - return markdown diff --git a/moatless/benchmark/report_v2.py b/moatless/benchmark/report_v2.py deleted file mode 100644 index b8430cce..00000000 --- a/moatless/benchmark/report_v2.py +++ /dev/null @@ -1,449 +0,0 @@ -import logging - -from moatless import FileRepository -from moatless.benchmark.swebench import found_in_expected_spans, found_in_alternative_spans, setup_swebench_repo -from moatless.benchmark.utils import get_missing_files -from moatless.edit.plan import ApplyChange -from moatless.file_context import FileContext -from moatless.find.search import SearchRequest - -logger = logging.getLogger(__name__) - -import logging - -from moatless import FileRepository -from moatless.benchmark.swebench import found_in_expected_spans, found_in_alternative_spans, setup_swebench_repo -from moatless.benchmark.utils import get_missing_files -from moatless.file_context import FileContext - -logger = logging.getLogger(__name__) - -import logging -from typing import Dict, List, Tuple, Optional - -from moatless import FileRepository -from moatless.benchmark.swebench import found_in_expected_spans, found_in_alternative_spans, setup_swebench_repo -from moatless.benchmark.utils import get_missing_files -from moatless.file_context import FileContext -from moatless.trajectory import Trajectory -from moatless.types import ActionTransaction, Usage, Content -from moatless.state import AgenticState - -logger = logging.getLogger(__name__) - - -def to_result(instance: Dict, trajectory: Trajectory, report: Optional[Dict] = None) -> Dict: - info = trajectory._info - - if report and "resolved_ids" in report and instance["instance_id"] in report["resolved_ids"]: - result_status = "resolved" - else: - result_status = info.get("status") - - resolved = result_status == "resolved" - - try: - result = { - "instance_id": instance["instance_id"], - "duration": info.get("duration", 0), - "total_cost": info.get("total_cost", 0), - "resolved_by": (len(instance.get("resolved_by", []))), - "status": None, - "result_status": result_status, - "transitions": len(trajectory.transitions), - "edited": False, - "planned": False, - "identified": None, - "expected_identified": None, - "alt_identified": None, - "found_in_search": None, - "file_identified": None, - "file_in_search": None, - "edit_retries": 0, - "has_diff": False, - "lint_codes": None, - "review": False, - "p_query": 0, - "p_file": 0, - "p_code": 0, - "p_class": 0, - "p_function": 0, - "lints": "", - } - - lint_codes = set() - search_results_spans: Dict[str, List[str]] = {} - identified_spans: Dict[str, List[str]] = {} - planned_spans: Dict[str, List[str]] = {} - edited_spans: Dict[str, List[str]] = {} - - id_iterations = 0 - search_iterations = 0 - - selected_transition_ids = [] - current_state = trajectory.get_current_state() - while current_state: - selected_transition_ids.append(current_state.id) - current_state = current_state.previous_state - - logger.info(f"Selected transitions: {selected_transition_ids}") - - if instance.get("expected_spans"): - for transition in trajectory.transitions: - if selected_transition_ids and transition.id not in selected_transition_ids: - continue - - state: AgenticState = transition.state - state_name = state.name - - if state_name not in result: - result[state_name] = 0 - result[f"{state_name}_cost"] = 0 - - result[state_name] += 1 - - expected_span_str = "" - for file_path, span_ids in instance["expected_spans"].items(): - expected_span_str += f"{file_path}: {span_ids} " - - if not state._actions: - continue - - for action in state._actions: - result[f"{state_name}_cost"] += action.usage.completion_cost if action.usage else 0 - - if state_name == "SearchCode": - search_iterations += 1 - - action = state._actions[-1] - - if isinstance(action.request, SearchRequest): - for search_request in action.request.search_requests: - if search_request.query: - result["p_query"] += 1 - if search_request.file_pattern: - result["p_file"] += 1 - if search_request.code_snippet: - result["p_code"] += 1 - if search_request.class_name or search_request.class_names: - result["p_class"] += 1 - if search_request.function_name or search_request.function_names: - result["p_function"] += 1 - - if state_name == "IdentifyCode": - id_iterations += 1 - - if state.ranked_spans: - for ranked_span in state.ranked_spans: - if ranked_span.file_path not in search_results_spans: - search_results_spans[ranked_span.file_path] = [] - search_results_spans[ranked_span.file_path].append(ranked_span.span_id) - - if not result["found_in_search"] and ( - found_in_expected_spans(instance, search_results_spans) - or found_in_alternative_spans(instance, search_results_spans) - ): - result["found_in_search"] = search_iterations - - if not result["file_in_search"]: - missing_files = get_missing_files( - instance["expected_spans"], - search_results_spans, - ) - if not missing_files: - result["file_in_search"] = search_iterations - - if state._actions: - action = state._actions[-1] - identified_str = "" - if action.request.identified_spans: - for span in action.request.identified_spans: - identified_str += f"{span.file_path}: {span.span_ids} " - if span.file_path not in identified_spans: - identified_spans[span.file_path] = [] - - for span_id in span.span_ids: - identified_spans[span.file_path].append(span_id) - result["identified_spans"] = identified_str - - if not result["file_identified"]: - missing_files = get_missing_files( - instance["expected_spans"], - identified_spans, - ) - if not missing_files: - result["file_identified"] = id_iterations - - if result["expected_identified"] is None and found_in_expected_spans(instance, identified_spans): - result["expected_identified"] = id_iterations - - if result["alt_identified"] is None and found_in_alternative_spans(instance, identified_spans): - result["alt_identified"] = id_iterations - - if result.get("alt_identified") or result.get("expected_identified"): - result["identified"] = min( - result.get("alt_identified") or 1000, - result.get("expected_identified") or 1000, - ) - - if state_name == "PlanToCode": - action = state._actions[-1] - - if action.request.action == "review": - result["review"] = True - - if action.request.file_path: - file_path = action.request.file_path - if file_path not in planned_spans: - planned_spans[file_path] = [] - planned_spans[file_path].append(action.request.span_id) - - if not result.get("planned") and ( - found_in_expected_spans(instance, planned_spans) - or found_in_alternative_spans(instance, planned_spans) - ): - result["planned"] = True - - if state_name == "EditCode": - result["edit_retries"] = len(state._actions) - 1 - - action = state._actions[-1] - edited = action.response and action.response.trigger == "finish" - - if edited and hasattr(state, 'file_path'): - file_path = state.file_path - if file_path not in edited_spans: - edited_spans[file_path] = [] - edited_spans[file_path].append(state.span_id) - - if not result.get("edited") and ( - found_in_expected_spans(instance, edited_spans) - or found_in_alternative_spans(instance, edited_spans) - ): - result["edited"] = True - - if action.response and action.response.output: - output = action.response.output - if edited: - result["has_diff"] = True - - for lint in output.get("verification_errors", []): - lint_codes.add(lint["code"]) - - if result.get("alt_identified") or result.get("expected_identified"): - result["identified"] = min( - result.get("alt_identified") or 1000, - result.get("expected_identified") or 1000, - ) - - result["expected_files"] = list(instance["expected_spans"].keys()) - result["edited_files"] = list(edited_spans.keys()) - result["identified_spans"] = sum(len(v) for v in identified_spans.values()) - - result["lints"] = ",".join(lint_codes) - - if result["edited"]: - result["status"] = "edited" - elif result["identified"]: - result["status"] = "identified" - elif result["found_in_search"]: - result["status"] = "found_in_search" - elif result["file_identified"]: - result["status"] = "file_identified" - else: - result["status"] = "" - - if "error" in info: - result["error"] = info["error"].split("\n")[0] - else: - result["error"] = "" - - except Exception as e: - raise e - - return result - -def generate_md_report(trajectory: Trajectory, instance: Dict) -> str: - info = trajectory._info - markdown = f"# {instance['instance_id']}\n" - - markdown += "\n## Problem statement\n" - markdown += f"```\n{instance['problem_statement']}\n```\n" - - if "error" in trajectory._info: - markdown += "\n## Error\n" - markdown += f"```\n{trajectory._info['error']}\n```\n" - else: - markdown += "\n## Prediction\n" - markdown += f"```diff\n{info['submission']}\n```\n" - - markdown += "\n## Golden patch\n" - markdown += f"```diff\n{instance['golden_patch']}\n```\n" - - markdown += "\n## Trajectory\n" - - repo_dir = setup_swebench_repo(instance) - file_repo = FileRepository(repo_dir) - - for j, transition in enumerate(trajectory.transitions): - state = transition.state - for i, action in enumerate(state._actions): - markdown += f"### {j+1} {state.name} ({i+1})\n\n" - - if state.name == "PlanToCode": - if action.request.file_path: - if action.request.instructions: - markdown += f"\n\n * {action.request.instructions}" - markdown += f"\n * {action.request.file_path}" - markdown += f"\n * {action.request.span_id}" - - markdown += "\n\n#### File context \n\n" - try: - file_context = FileContext(file_repo) - file_context.add_span_to_context( - action.request.file_path, - action.request.span_id, - ) - markdown += file_context.create_prompt( - show_outcommented_code=True - ) - except Exception as e: - logger.error(e) - - if state.name == "EditCode": - markdown += "#### LLM Response\n\n" - markdown += f"```\n{action.request.content if isinstance(action.request, Content) else ''}\n```\n" - - if action.response and action.response.output: - output = action.response.output - if output.get("diff"): - markdown += "#### Diff\n\n" - markdown += f"```diff\n{output['diff']}\n```\n" - - if output.get("errors"): - markdown += "#### Errors\n\n" - markdown += f"{output['errors']}\n\n" - - if output.get("message"): - markdown += "#### Message\n\n" - markdown += f"{output['message']}\n\n" - - if state.name == "ClarifyCodeChange": - - if action.request.scratch_pad: - markdown += f"*{action.request.scratch_pad}*" - - if action.response and action.response.output: - output = action.response.output - if output.get("start_line"): - markdown += f"\n* Start Line: {output['start_line']}\n" - markdown += f"\n* End Line: {output['end_line']}\n" - - if state.name == "Finished": - markdown += f"*{action.request.thoughts}*\n" - - if state.name == "Rejected": - markdown += f"*{action.request.thoughts}*\n" - - markdown += "## Alternative patches\n" - for alternative in instance["resolved_by"]: - markdown += f"### {alternative['name']}\n" - markdown += f"```diff\n{alternative['patch']}\n```\n" - - return markdown -def generate_md_report(trajectory: dict, instance: dict): - info = trajectory["info"] - markdown = f"# {instance['instance_id']}\n" - - markdown += "\n## Problem statement\n" - markdown += f"```\n{instance['problem_statement']}\n```\n" - - if "error" in trajectory["info"]: - markdown += "\n## Error\n" - markdown += f"```\n{trajectory['info']['error']}\n```\n" - else: - markdown += "\n## Prediction\n" - markdown += f"```diff\n{info['submission']}\n```\n" - - markdown += "\n## Golden patch\n" - markdown += f"```diff\n{instance['golden_patch']}\n```\n" - - markdown += "\n## Trajectory\n" - - repo_dir = setup_swebench_repo(instance) - file_repo = FileRepository(repo_dir) - - for j, step in enumerate(trajectory["transitions"]): - for i, traj_action in enumerate(step["actions"]): - state_name = step['state'] - markdown += f"### {j+1} {state_name} ({i+1})\n\n" - - if not traj_action.get("action"): - continue - action = traj_action["action"] - - if state_name == "PlanToCode": - if action.get("scratch_pad"): - markdown += "*" + action["scratch_pad"] + "*" - - if action.get("instructions"): - markdown += f"\n\n * {action['instructions']}" - - if action.get("file_path"): - markdown += f"\n * {action['file_path']}" - - if action.get("span_id"): - markdown += f"\n * {action['span_id']}" - - if action.get("file_path") and action.get("span_id"): - markdown += "\n\n#### File context \n\n" - try: - file_context = FileContext(file_repo) - file_context.add_span_to_context( - action.get("file_path"), - action.get("span_id"), - ) - markdown += file_context.create_prompt( - show_outcommented_code=True - ) - except Exception as e: - logger.error(e) - - if state_name == "EditCode": - markdown += "#### LLM Response\n\n" - markdown += f"```\n{action.get('content', '')}\n```\n" - - output = traj_action.get("output") - if output: - if output.get("diff"): - markdown += "#### Diff\n\n" - markdown += f"```diff\n{output['diff']}\n```\n" - - if output.get("errors"): - markdown += "#### Errors\n\n" - markdown += f"{output['errors']}\n\n" - - if output.get("message"): - markdown += "#### Message\n\n" - markdown += f"{output['message']}\n\n" - - if state_name == "ClarifyCodeChange": - if action.get("thoughts"): - markdown += "*" + action["thoughts"] + "*" - - if action.get("output") and action.get("output").get("start_line"): - markdown += f"\n* Start Line: {action['output']['start_line']}\n" - markdown += f"\n* End Line: {action['output']['end_line']}\n" - - if state_name == "Finished": - markdown += f"*{action['properties']['message']}*\n" - - if state_name == "Rejected": - markdown += f"*{action['properties']['message']}*\n" - - markdown += "## Alternative patches\n" - for alternative in instance["resolved_by"]: - markdown += f"### {alternative['name']}\n" - markdown += f"```diff\n{alternative['patch']}\n```\n" - - return markdown diff --git a/moatless/benchmark/swebench/utils.py b/moatless/benchmark/swebench/utils.py index 6090ad36..91e948d2 100644 --- a/moatless/benchmark/swebench/utils.py +++ b/moatless/benchmark/swebench/utils.py @@ -1,20 +1,20 @@ +import fcntl import logging import os from typing import Optional -from datasets import load_dataset - from moatless.benchmark.utils import ( - file_spans_to_dict, get_missing_files, get_missing_spans, ) -from moatless.file_context import FileContext from moatless.index import CodeIndex -from moatless.repository import FileRepository, GitRepository -from moatless.utils.repo import setup_github_repo -from moatless.workspace import Workspace - +from moatless.repository import GitRepository +from moatless.repository.repository import Repository +from moatless.utils.repo import ( + setup_github_repo, + get_repo_dir_name, + retry_clone, +) logger = logging.getLogger(__name__) @@ -22,6 +22,8 @@ def load_instances( dataset_name: str = "princeton-nlp/SWE-bench_Lite", split: str = "test" ): + from datasets import load_dataset + data = load_dataset(dataset_name, split=split) return {d["instance_id"]: d for d in data} @@ -40,6 +42,8 @@ def sorted_instances( split: str = "test", sort_by: str = "created_at", ): + from datasets import load_dataset + data = load_dataset(dataset_name, split=split) instances = list(data) instances = sorted(instances, key=lambda x: x[sort_by]) @@ -47,7 +51,7 @@ def sorted_instances( def get_repo_dir_name(repo: str): - return repo.replace("/", "_") + return repo.replace("/", "__") def found_in_expected_spans(instance: dict, spans: dict): @@ -56,6 +60,7 @@ def found_in_expected_spans(instance: dict, spans: dict): logging.warning( f"{instance['instance_id']} Expected spans for {file_path} is empty" ) + missing_spans = get_missing_spans(instance["expected_spans"], spans) return not missing_spans @@ -66,7 +71,7 @@ def found_in_alternative_spans(instance: dict, spans: dict): for alternative_spans in instance["alternative_spans"]: for file_path, span_ids in alternative_spans["spans"].items(): if not span_ids: - logging.warning( + logging.info( f"{instance['instance_id']} Alternative spans for {file_path} is empty" ) @@ -77,205 +82,21 @@ def found_in_alternative_spans(instance: dict, spans: dict): return False -def sync_file_context_with_search_trajectory(workspace: Workspace, trajectory: dict): - for transition in trajectory["transitions"]: - for action in transition["actions"]: - if action["action"].get("identified_spans"): - for span in action["action"]["identified_spans"]: - workspace.file_context.add_spans_to_context( - span["file_path"], span["span_ids"] - ) - - -def verify_search_trajectory( - trajectory: dict, instance: dict, workspace: Workspace -) -> dict: - result = { - "transitions": len(trajectory["transitions"]), - "identifieed": None, - "expected_identified": None, - "alt_identified": None, - "identified": None, - "file_identified": None, - "found_in_search": None, - "tokens": 0, - "expanded_imports": False, - "expanded_related": False, - "expanded_small_classes": False, - "expanded_tokens": 0, - } - - file_context = workspace.create_file_context() - search_file_context = workspace.create_file_context() - - iterations = 0 - for transition in trajectory["transitions"]: - if transition["name"] == "SearchCode": - iterations += 1 - - for action in transition["actions"]: - if ( - "output" in action - and action.get("output") - and action["output"].get("ranked_spans") - ): - for ranked_span in action["output"]["ranked_spans"]: - search_file_context.add_spans_to_context( - ranked_span["file_path"], [ranked_span["span_id"]] - ) - - if action["action"].get("identified_spans"): - for span in action["action"]["identified_spans"]: - file_context.add_spans_to_context( - span["file_path"], span["span_ids"] - ) - - if result["found_in_search"] is None and ( - found_in_expected_spans( - instance, - file_spans_to_dict(search_file_context.to_files_with_spans()), - ) - or found_in_alternative_spans( - instance, file_spans_to_dict(file_context.to_files_with_spans()) - ) - ): - result["found_in_search"] = iterations - - if result["file_identified"] is None: - missing_files = get_missing_files( - instance["expected_spans"], - file_spans_to_dict(file_context.to_files_with_spans()), +def found_in_alternative_files(instance: dict, files: list): + if "alternative_spans" not in instance: + return False + for alternative_spans in instance["alternative_spans"]: + for file_path, span_ids in alternative_spans["spans"].items(): + if not span_ids: + logging.info( + f"{instance['instance_id']} Alternative spans for {file_path} is empty" ) - if not missing_files: - result["file_identified"] = iterations - - if result["expected_identified"] is None and found_in_expected_spans( - instance, file_spans_to_dict(file_context.to_files_with_spans()) - ): - result["expected_identified"] = iterations - - if result["alt_identified"] is None and found_in_alternative_spans( - instance, file_spans_to_dict(file_context.to_files_with_spans()) - ): - result["alt_identified"] = iterations - - if result["expected_identified"] is not None: - result["identified"] = result["expected_identified"] - - if result["alt_identified"] is not None and ( - result["identified"] is None or result["alt_identified"] < result["identified"] - ): - result["identified"] = result["alt_identified"] - - result["tokens"] = file_context.context_size() - - file_context.expand_context_with_init_spans() - actual_span_dicts = file_spans_to_dict(file_context.to_files_with_spans()) - - if found_in_expected_spans( - instance, actual_span_dicts - ) or found_in_alternative_spans(instance, actual_span_dicts): - result["expanded_imports"] = True - - file_context.expand_context_with_related_spans(max_tokens=8000) - if found_in_expected_spans( - instance, file_spans_to_dict(file_context.to_files_with_spans()) - ) or found_in_alternative_spans( - instance, file_spans_to_dict(file_context.to_files_with_spans()) - ): - result["expanded_related"] = True - - file_context.expand_small_classes(max_tokens=500) - if found_in_expected_spans( - instance, file_spans_to_dict(file_context.to_files_with_spans()) - ) or found_in_alternative_spans( - instance, file_spans_to_dict(file_context.to_files_with_spans()) - ): - result["expanded_small_classes"] = True - - result["expanded_tokens"] = file_context.context_size() - - result["iterations"] = iterations - return result - - -def generate_md_report(trajectory: dict, instance: dict): - info = trajectory["info"] - markdown = f"# {info['instance_id']}\n" - - markdown += "\n## Problem statement\n" - markdown += f"```\n{instance['problem_statement']}\n```\n" - - if "error" in trajectory["info"]: - markdown += "\n## Error\n" - markdown += f"```\n{trajectory['info']['error']}\n```\n" - else: - markdown += "\n## Prediction\n" - markdown += f"```diff\n{info['submission']}\n```\n" - - markdown += "\n## Golden patch\n" - markdown += f"```diff\n{instance['golden_patch']}\n```\n" - - markdown += "\n## Trajectory\n" - - repo_dir = setup_swebench_repo(instance) - file_repo = FileRepository(repo_dir) - for step in trajectory["transitions"]: - for i, action in enumerate(step["actions"]): - markdown += f"### {step['name']} ({i})\n\n" - - if step["name"] == "PlanToCode": - if action.get("action").get("thoughts"): - markdown += "*" + action["action"]["thoughts"] + "*" - - if action.get("action", {}).get("action", {}).get("description"): - markdown += f"\n\n * {action['action']['action']['description']}" - - if action.get("action", {}).get("action", {}).get("file_path"): - markdown += f"\n * {action['action']['action']['file_path']}" - - if action.get("action", {}).get("action", {}).get("span_id"): - markdown += f"\n * {action['action']['action']['span_id']}" - - markdown += "\n\n#### File context \n\n" - - file_context = FileContext(file_repo) - file_context.add_span_to_context( - action["action"]["action"]["file_path"], - action["action"]["action"]["span_id"], - ) - - markdown += file_context.create_prompt(show_outcommented_code=True) - - if step["name"] == "EditCode": - markdown += "#### LLM Response\n\n" - markdown += f"```\n{action['action']['content']}\n```\n" - - if action.get("output", {}).get("message"): - markdown += "#### Output\n\n" - markdown += f"{action['output']['message']}\n\n" - - if step["name"] == "ClarifyCodeChange": - if action.get("thoughts"): - markdown += "*" + action["thoughts"] + "*" - - if action.get("output", {}).get("start_line"): - markdown += f"\n* Start Line: {action['output']['start_line']}\n" - markdown += f"\n* End Line: {action['output']['end_line']}\n" - - if step["name"] == "Finished": - markdown += f"*{action['properties']['message']}*\n" - - if step["name"] == "Rejected": - markdown += f"*{action['properties']['message']}*\n" - - markdown += "## Alternative patches\n" - for alternative in instance["resolved_by"]: - markdown += f"### {alternative['name']}\n" - markdown += f"```diff\n{alternative['patch']}\n```\n" + missing_spans = get_missing_files(alternative_spans["spans"], files) + if not missing_spans: + return True - return markdown + return False def setup_swebench_repo( @@ -301,11 +122,10 @@ def setup_swebench_repo( ) -def create_workspace( +def create_repository( instance: Optional[dict] = None, instance_id: Optional[str] = None, repo_base_dir: Optional[str] = None, - index_store_dir: Optional[str] = None, ): """ Create a workspace for the given SWE-bench instance. @@ -314,24 +134,67 @@ def create_workspace( if not instance: instance = load_instance(instance_id) - if not index_store_dir: - index_store_dir = os.getenv("INDEX_STORE_DIR", "/tmp/index_store") - if not repo_base_dir: repo_base_dir = os.getenv("REPO_DIR", "/tmp/repos") - repo_dir_name = instance["repo"].replace("/", "__") - repo_url = f"https://github.com/swe-bench/{repo_dir_name}.git" - repo_dir = f"{repo_base_dir}/swe-bench_{repo_dir_name}" - repo = GitRepository.from_repo( - git_repo_url=repo_url, repo_path=repo_dir, commit=instance["base_commit"] + # Ensure the directory exists + os.makedirs(os.path.dirname(repo_base_dir), exist_ok=True) + + # Ensure the base directory exists + os.makedirs(repo_base_dir, exist_ok=True) + + repo_dir_name = get_repo_dir_name(instance["repo"]) + local_repo_path = f"{repo_base_dir}/swe-bench_{repo_dir_name}" + lock_file_path = f"{local_repo_path}.lock" + + # Ensure the directory for the lock file exists + os.makedirs(os.path.dirname(lock_file_path), exist_ok=True) + + repo_path = f"{repo_base_dir}/swe-bench_{instance['instance_id']}" + if os.path.exists(repo_path): + try: + logger.info(f"Initializing GitRepository from existing repo {repo_path}") + return GitRepository(repo_path=repo_path) + except Exception as e: + logging.warning(f"Error initializing GitRepository: {e}") + + with open(lock_file_path, "w") as lock_file: + logging.debug(f"Acquiring lock for {local_repo_path}") + fcntl.flock(lock_file, fcntl.LOCK_EX) + if not os.path.exists(local_repo_path): + # Clone from GitHub if local repo doesn't exist + github_url = f"https://github.com/swe-bench/{repo_dir_name}.git" + try: + retry_clone(github_url, local_repo_path) + logging.info(f"Cloned {github_url} to {local_repo_path}") + except Exception as e: + logger.error(f"Failed to clone after multiple attempts: {e}") + raise + logging.debug(f"Releasing lock for {local_repo_path}") + fcntl.flock(lock_file, fcntl.LOCK_UN) + + repo_url = f"file://{local_repo_path}" + + return GitRepository.from_repo( + git_repo_url=repo_url, repo_path=repo_path, commit=instance["base_commit"] ) - code_index = CodeIndex.from_index_name( - instance["instance_id"], index_store_dir=index_store_dir, file_repo=repo - ) - return Workspace( - file_repo=repo, - code_index=code_index, +def create_index( + instance: dict, + repository: Repository | None = None, + index_store_dir: Optional[str] = None, +): + """ + Create a workspace for the given SWE-bench instance. + """ + if not index_store_dir: + index_store_dir = os.getenv("INDEX_STORE_DIR", "/tmp/index_store") + + if not repository: + repository = create_repository(instance) + + code_index = CodeIndex.from_index_name( + instance["instance_id"], index_store_dir=index_store_dir, file_repo=repository ) + return code_index diff --git a/moatless/benchmark/swebench_lite_all_evaluations.json b/moatless/benchmark/swebench_lite_all_evaluations.json index 28b4dde5..81d2ed96 100644 --- a/moatless/benchmark/swebench_lite_all_evaluations.json +++ b/moatless/benchmark/swebench_lite_all_evaluations.json @@ -5,8038 +5,59776 @@ "base_commit": "a0df2cbb10419037d11d04352b3175405ab52941", "problem_statement": "Allow lists in the dict values of the hooks argument\nCurrently the Request class has a .register_hook() method but it parses the dictionary it expects from it's hooks argument weirdly: the argument can only specify one hook function per hook. If you pass in a list of hook functions per hook the code in Request.**init**() will wrap the list in a list which then fails when the hooks are consumed (since a list is not callable). This is especially annoying since you can not use multiple hooks from a session. The only way to get multiple hooks now is to create the request object without sending it, then call .register_hook() multiple times and then finally call .send().\n\nThis would all be much easier if Request.**init**() parsed the hooks parameter in a way that it accepts lists as it's values.\n\n", "golden_patch": "diff --git a/requests/models.py b/requests/models.py\n--- a/requests/models.py\n+++ b/requests/models.py\n@@ -462,8 +462,10 @@ def path_url(self):\n \n def register_hook(self, event, hook):\n \"\"\"Properly register a hook.\"\"\"\n-\n- self.hooks[event].append(hook)\n+ if isinstance(hook, (list, tuple, set)):\n+ self.hooks[event].extend(hook)\n+ else:\n+ self.hooks[event].append(hook)\n \n def deregister_hook(self, event, hook):\n \"\"\"Deregister a previously registered hook.\n", + "test_patch": "diff --git a/tests/test_requests.py b/tests/test_requests.py\n--- a/tests/test_requests.py\n+++ b/tests/test_requests.py\n@@ -744,6 +744,40 @@ def add_bar_header(args):\n assert 'foo' in response.text\n assert 'bar' in response.text\n \n+ def test_allow_list_of_hooks_to_register_hook(self):\n+ \"\"\"Issue 785: https://github.com/kennethreitz/requests/issues/785\"\"\"\n+ def add_foo_header(args):\n+ if not args.get('headers'):\n+ args['headers'] = {}\n+\n+ args['headers'].update({\n+ 'X-Foo': 'foo'\n+ })\n+\n+ return args\n+\n+ def add_bar_header(args):\n+ if not args.get('headers'):\n+ args['headers'] = {}\n+\n+ args['headers'].update({\n+ 'X-Bar': 'bar'\n+ })\n+\n+ return args\n+\n+ def assert_hooks_are_callable(hooks):\n+ for h in hooks['args']:\n+ assert callable(h) is True\n+\n+ hooks = [add_foo_header, add_bar_header]\n+ r = requests.models.Request()\n+ r.register_hook('args', hooks)\n+ assert_hooks_are_callable(r.hooks)\n+\n+ r = requests.models.Request(hooks={'args': hooks})\n+ assert_hooks_are_callable(r.hooks)\n+\n def test_session_persistent_cookies(self):\n \n s = requests.session()\n", + "fail_to_pass": "[\"tests/test_requests.py::RequestsTestSuite::test_POSTBIN_GET_POST_FILES_WITH_HEADERS\", \"tests/test_requests.py::RequestsTestSuite::test_nonurlencoded_postdata\", \"tests/test_requests.py::RequestsTestSuite::test_prefetch_redirect_bug\", \"tests/test_requests.py::RequestsTestSuite::test_urlencoded_post_data\"]", + "pass_to_pass": "[\"tests/test_requests.py::RequestsTestSuite::test_BASICAUTH_HTTP_200_OK_GET\", \"tests/test_requests.py::RequestsTestSuite::test_BASICAUTH_TUPLE_HTTP_200_OK_GET\", \"tests/test_requests.py::RequestsTestSuite::test_GET_no_redirect\", \"tests/test_requests.py::RequestsTestSuite::test_HEAD_no_redirect\", \"tests/test_requests.py::RequestsTestSuite::test_HTTP_200_OK_GET\", \"tests/test_requests.py::RequestsTestSuite::test_HTTP_200_OK_GET_WITH_MIXED_PARAMS\", \"tests/test_requests.py::RequestsTestSuite::test_HTTP_200_OK_GET_WITH_PARAMS\", \"tests/test_requests.py::RequestsTestSuite::test_HTTP_200_OK_HEAD\", \"tests/test_requests.py::RequestsTestSuite::test_HTTP_200_OK_PUT\", \"tests/test_requests.py::RequestsTestSuite::test_HTTP_302_ALLOW_REDIRECT_GET\", \"tests/test_requests.py::RequestsTestSuite::test_HTTP_302_GET\", \"tests/test_requests.py::RequestsTestSuite::test_POSTBIN_GET_POST_FILES\", \"tests/test_requests.py::RequestsTestSuite::test_POSTBIN_GET_POST_FILES_WITH_PARAMS\", \"tests/test_requests.py::RequestsTestSuite::test_accept_objects_with_string_representations_as_urls\", \"tests/test_requests.py::RequestsTestSuite::test_bytes_files\", \"tests/test_requests.py::RequestsTestSuite::test_cached_response\", \"tests/test_requests.py::RequestsTestSuite::test_can_have_none_in_header_values\", \"tests/test_requests.py::RequestsTestSuite::test_connection_error\", \"tests/test_requests.py::RequestsTestSuite::test_connection_error_with_safe_mode\", \"tests/test_requests.py::RequestsTestSuite::test_connection_keepalive_and_close\", \"tests/test_requests.py::RequestsTestSuite::test_danger_mode_redirects\", \"tests/test_requests.py::RequestsTestSuite::test_decompress_gzip\", \"tests/test_requests.py::RequestsTestSuite::test_default_status_raising\", \"tests/test_requests.py::RequestsTestSuite::test_empty_response\", \"tests/test_requests.py::RequestsTestSuite::test_entry_points\", \"tests/test_requests.py::RequestsTestSuite::test_file_post_data\", \"tests/test_requests.py::RequestsTestSuite::test_head_content\", \"tests/test_requests.py::RequestsTestSuite::test_httpauth_recursion\", \"tests/test_requests.py::RequestsTestSuite::test_invalid_url\", \"tests/test_requests.py::RequestsTestSuite::test_invalid_urls_throw_requests_exception\", \"tests/test_requests.py::RequestsTestSuite::test_iter_lines\", \"tests/test_requests.py::RequestsTestSuite::test_max_redirects\", \"tests/test_requests.py::RequestsTestSuite::test_multiple_hooks\", \"tests/test_requests.py::RequestsTestSuite::test_nonurlencoded_post_data\", \"tests/test_requests.py::RequestsTestSuite::test_nonzero_evaluation\", \"tests/test_requests.py::RequestsTestSuite::test_params_accepts_kv_list\", \"tests/test_requests.py::RequestsTestSuite::test_params_are_added_before_fragment\", \"tests/test_requests.py::RequestsTestSuite::test_path_is_not_double_encoded\", \"tests/test_requests.py::RequestsTestSuite::test_post_fields_with_multiple_values_and_files\", \"tests/test_requests.py::RequestsTestSuite::test_post_fields_with_multiple_values_and_files_as_tuples\", \"tests/test_requests.py::RequestsTestSuite::test_prefetch_return_response_interaction\", \"tests/test_requests.py::RequestsTestSuite::test_redirect_history\", \"tests/test_requests.py::RequestsTestSuite::test_relative_redirect_history\", \"tests/test_requests.py::RequestsTestSuite::test_request_ok_set\", \"tests/test_requests.py::RequestsTestSuite::test_response_has_unicode_url\", \"tests/test_requests.py::RequestsTestSuite::test_response_sent\", \"tests/test_requests.py::RequestsTestSuite::test_session_HTTP_200_OK_GET\", \"tests/test_requests.py::RequestsTestSuite::test_session_persistent_headers\", \"tests/test_requests.py::RequestsTestSuite::test_session_pickling\", \"tests/test_requests.py::RequestsTestSuite::test_status_raising\", \"tests/test_requests.py::RequestsTestSuite::test_unicode_get\", \"tests/test_requests.py::RequestsTestSuite::test_upload_binary_data\", \"tests/test_requests.py::RequestsTestSuite::test_urlencoded_get_query_multivalued_param\", \"tests/test_requests.py::RequestsTestSuite::test_urlencoded_post_query_and_data\", \"tests/test_requests.py::RequestsTestSuite::test_urlencoded_post_query_multivalued_and_data\", \"tests/test_requests.py::RequestsTestSuite::test_urlencoded_post_querystring\", \"tests/test_requests.py::RequestsTestSuite::test_urlencoded_post_querystring_multivalued\", \"tests/test_requests.py::RequestsTestSuite::test_useful_exception_for_invalid_port\", \"tests/test_requests.py::RequestsTestSuite::test_useful_exception_for_invalid_scheme\", \"tests/test_requests.py::RequestsTestSuite::test_user_agent_transfers\"]", "expected_spans": { "requests/models.py": [ "Request.register_hook" ] }, + "test_file_spans": { + "tests/test_requests.py": [ + "RequestsTestSuite.test_session_persistent_cookies" + ] + }, "resolved_by": [ - { - "name": "20240509_amazon-q-developer-agent-20240430-dev", - "patch": "--- a/requests/models.py\n+++ b/requests/models.py\n@@ -146,13 +146,17 @@\n for event in HOOKS:\n self.hooks[event] = []\n \n hooks = hooks or {}\n \n for (k, v) in list(hooks.items()):\n- self.register_hook(event=k, hook=v)\n+ if isinstance(v, list):\n+ for hook in v:\n+ self.register_hook(event=k, hook=hook)\n+ else:\n+ self.register_hook(event=k, hook=v)\n \n #: Session.\n self.session = session\n \n #: SSL Verification.\n self.verify = verify\n" - }, { "name": "20240523_aider", - "patch": "diff --git a/requests/models.py b/requests/models.py\nindex d8456375..34bb9dc5 100644\n--- a/requests/models.py\n+++ b/requests/models.py\n@@ -148,8 +148,12 @@ class Request(object):\n \n hooks = hooks or {}\n \n- for (k, v) in list(hooks.items()):\n- self.register_hook(event=k, hook=v)\n+ for event, hook in hooks.items():\n+ if isinstance(hook, list):\n+ for h in hook:\n+ self.register_hook(event=event, hook=h)\n+ else:\n+ self.register_hook(event=event, hook=hook)\n \n #: Session.\n self.session = session\n@@ -463,7 +467,10 @@ class Request(object):\n def register_hook(self, event, hook):\n \"\"\"Properly register a hook.\"\"\"\n \n- self.hooks[event].append(hook)\n+ if isinstance(hook, list):\n+ self.hooks[event].extend(hook)\n+ else:\n+ self.hooks[event].append(hook)\n \n def deregister_hook(self, event, hook):\n \"\"\"Deregister a previously registered hook.\n" + "updated_spans": { + "requests/models.py": [ + "Request.__init__", + "Request.register_hook" + ] + }, + "alternative_spans": { + "requests/models.py": [ + "Request.__init__", + "Request.register_hook" + ] + } }, { "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/requests/models.py\n+++ b/requests/models.py\n@@ -149,7 +149,11 @@\n hooks = hooks or {}\n \n for (k, v) in list(hooks.items()):\n- self.register_hook(event=k, hook=v)\n+ if isinstance(v, list):\n+ for hook_func in v:\n+ self.register_hook(event=k, hook=hook_func)\n+ else:\n+ self.register_hook(event=k, hook=v)\n \n #: Session.\n self.session = session\n" + "updated_spans": { + "requests/models.py": [ + "Request.__init__" + ] + }, + "alternative_spans": { + "requests/models.py": [ + "Request.__init__" + ] + } }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/requests/models.py b/requests/models.py\nindex d8456375..16894ace 100644\n--- a/requests/models.py\n+++ b/requests/models.py\n@@ -462,8 +462,11 @@ class Request(object):\n \n def register_hook(self, event, hook):\n \"\"\"Properly register a hook.\"\"\"\n-\n- self.hooks[event].append(hook)\n+ \n+ if isinstance(hook, list):\n+ self.hooks[event].extend(hook)\n+ else:\n+ self.hooks[event].append(hook)\n \n def deregister_hook(self, event, hook):\n \"\"\"Deregister a previously registered hook.\n" + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "requests/models.py": [ + "Request.__init__" + ] + }, + "alternative_spans": { + "requests/models.py": [ + "Request.__init__" + ] + } }, { "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/requests/models.py b/requests/models.py\nindex d8456375..aa98e367 100644\n--- a/requests/models.py\n+++ b/requests/models.py\n@@ -149,7 +149,11 @@ class Request(object):\n hooks = hooks or {}\n \n for (k, v) in list(hooks.items()):\n- self.register_hook(event=k, hook=v)\n+ if isinstance(v, list):\n+ for hook in v:\n+ self.register_hook(event=k, hook=hook)\n+ else:\n+ self.register_hook(event=k, hook=v)\n \n #: Session.\n self.session = session\n" + "updated_spans": { + "requests/models.py": [ + "Request.__init__" + ] + }, + "alternative_spans": { + "requests/models.py": [ + "Request.__init__" + ] + } }, { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/requests/models.py b/requests/models.py\nindex d8456375..aa98e367 100644\n--- a/requests/models.py\n+++ b/requests/models.py\n@@ -149,7 +149,11 @@ class Request(object):\n hooks = hooks or {}\n \n for (k, v) in list(hooks.items()):\n- self.register_hook(event=k, hook=v)\n+ if isinstance(v, list):\n+ for hook in v:\n+ self.register_hook(event=k, hook=hook)\n+ else:\n+ self.register_hook(event=k, hook=v)\n \n #: Session.\n self.session = session\n" - } - ], - "alternative_spans": [ + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, { - "run_name": "20240509_amazon-q-developer-agent-20240430-dev", - "spans": { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "requests/models.py": [ + "Request.__init__", + "Request.register_hook" + ] + }, + "alternative_spans": { "requests/models.py": [ - "Request" + "Request.__init__", + "Request.register_hook" ] } }, { - "run_name": "20240524_opencsg_starship_gpt4", - "spans": { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "requests/models.py": [ + "Request.__init__" + ] + }, + "alternative_spans": { "requests/models.py": [ - "Request" + "Request.__init__" ] } }, { - "run_name": "20240530_autocoderover-v20240408", - "spans": { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "requests/models.py": [ + "Request.__init__" + ] + }, + "alternative_spans": { "requests/models.py": [ - "Request" + "Request.__init__" ] } }, { - "run_name": "20240609_moatless_gpt4o", - "spans": { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "requests/models.py": [ + "docstring", + "imports", + "Request.__init__", + "Request.__repr__", + "Request._build_response", + "Request._encode_params", + "Request._encode_files", + "Request.full_url", + "Request.path_url", + "Request.register_hook", + "Request.deregister_hook", + "Request.send", + "Response.__init__", + "Response.__repr__", + "Response.iter_content", + "Response.iter_lines", + "Response.content", + "Response.text", + "Response.links", + "Response.raise_for_status" + ] + }, + "alternative_spans": { "requests/models.py": [ - "Request" + "docstring", + "imports", + "Request.__init__", + "Request.__repr__", + "Request._build_response", + "Request._encode_params", + "Request._encode_files", + "Request.full_url", + "Request.path_url", + "Request.register_hook", + "Request.deregister_hook", + "Request.send", + "Response.__init__", + "Response.__repr__", + "Response.iter_content", + "Response.iter_lines", + "Response.content", + "Response.text", + "Response.links", + "Response.raise_for_status" ] } - } - ] - }, - { - "instance_id": "psf__requests-1963", - "repo": "psf/requests", - "base_commit": "110048f9837f8441ea536804115e80b69f400277", - "problem_statement": "`Session.resolve_redirects` copies the original request for all subsequent requests, can cause incorrect method selection\nConsider the following redirection chain:\n\n```\nPOST /do_something HTTP/1.1\nHost: server.example.com\n...\n\nHTTP/1.1 303 See Other\nLocation: /new_thing_1513\n\nGET /new_thing_1513\nHost: server.example.com\n...\n\nHTTP/1.1 307 Temporary Redirect\nLocation: //failover.example.com/new_thing_1513\n```\n\nThe intermediate 303 See Other has caused the POST to be converted to\na GET. The subsequent 307 should preserve the GET. However, because\n`Session.resolve_redirects` starts each iteration by copying the _original_\nrequest object, Requests will issue a POST!\n\n", - "golden_patch": "diff --git a/requests/sessions.py b/requests/sessions.py\n--- a/requests/sessions.py\n+++ b/requests/sessions.py\n@@ -168,8 +168,11 @@ def resolve_redirects(self, resp, req, stream=False, timeout=None,\n if new_auth is not None:\n prepared_request.prepare_auth(new_auth)\n \n+ # Override the original request.\n+ req = prepared_request\n+\n resp = self.send(\n- prepared_request,\n+ req,\n stream=stream,\n timeout=timeout,\n verify=verify,\n", - "expected_spans": { - "requests/sessions.py": [ - "SessionRedirectMixin.resolve_redirects" - ] - }, - "resolved_by": [ - { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/requests/sessions.py b/requests/sessions.py\nindex 425db22c..0d815209 100644\n--- a/requests/sessions.py\n+++ b/requests/sessions.py\n@@ -86,6 +86,7 @@ class SessionRedirectMixin(object):\n \"\"\"Receives a Response. Returns a generator of Responses.\"\"\"\n \n i = 0\n+ method = req.method\n \n while resp.is_redirect:\n prepared_request = req.copy()\n@@ -99,7 +100,6 @@ class SessionRedirectMixin(object):\n resp.close()\n \n url = resp.headers['location']\n- method = req.method\n \n # Handle redirection without scheme (see: RFC 1808 Section 4)\n if url.startswith('//'):\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "psf__requests-2148", - "repo": "psf/requests", - "base_commit": "fe693c492242ae532211e0c173324f09ca8cf227", - "problem_statement": "socket.error exception not caught/wrapped in a requests exception (ConnectionError perhaps?)\nI just noticed a case where I had a socket reset on me, and was raised to me as a raw socket error as opposed to something like a requests.exceptions.ConnectionError:\n\n```\n File \"/home/rtdean/***/***/***/***/***/***.py\", line 67, in dir_parse\n root = ElementTree.fromstring(response.text)\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/site-packages/requests-2.3.0-py2.7.egg/requests/models.py\", line 721, in text\n if not self.content:\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/site-packages/requests-2.3.0-py2.7.egg/requests/models.py\", line 694, in content\n self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/site-packages/requests-2.3.0-py2.7.egg/requests/models.py\", line 627, in generate\n for chunk in self.raw.stream(chunk_size, decode_content=True):\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/site-packages/requests-2.3.0-py2.7.egg/requests/packages/urllib3/response.py\", line 240, in stream\n data = self.read(amt=amt, decode_content=decode_content)\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/site-packages/requests-2.3.0-py2.7.egg/requests/packages/urllib3/response.py\", line 187, in read\n data = self._fp.read(amt)\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/httplib.py\", line 543, in read\n return self._read_chunked(amt)\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/httplib.py\", line 612, in _read_chunked\n value.append(self._safe_read(chunk_left))\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/httplib.py\", line 658, in _safe_read\n chunk = self.fp.read(min(amt, MAXAMOUNT))\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/socket.py\", line 380, in read\n data = self._sock.recv(left)\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/site-packages/gevent-1.0.1-py2.7-linux-x86_64.egg/gevent/socket.py\", line 385, in recv\n return sock.recv(*args)\nsocket.error: [Errno 104] Connection reset by peer\n```\n\nNot sure if this is by accident or design... in general, I guess I'd expect a requests exception when using requests, but I can start looking for socket errors and the like as well.\n\n", - "golden_patch": "diff --git a/requests/models.py b/requests/models.py\n--- a/requests/models.py\n+++ b/requests/models.py\n@@ -9,6 +9,7 @@\n \n import collections\n import datetime\n+import socket\n \n from io import BytesIO, UnsupportedOperation\n from .hooks import default_hooks\n@@ -22,7 +23,7 @@\n from .packages.urllib3.exceptions import DecodeError\n from .exceptions import (\n HTTPError, RequestException, MissingSchema, InvalidURL,\n- ChunkedEncodingError, ContentDecodingError)\n+ ChunkedEncodingError, ContentDecodingError, ConnectionError)\n from .utils import (\n guess_filename, get_auth_from_url, requote_uri,\n stream_decode_response_unicode, to_key_val_list, parse_header_links,\n@@ -640,6 +641,8 @@ def generate():\n raise ChunkedEncodingError(e)\n except DecodeError as e:\n raise ContentDecodingError(e)\n+ except socket.error as e:\n+ raise ConnectionError(e)\n except AttributeError:\n # Standard file-like object.\n while True:\n", - "expected_spans": { - "requests/models.py": [ - "imports", - "Response.iter_content" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "psf__requests-2317", - "repo": "psf/requests", - "base_commit": "091991be0da19de9108dbe5e3752917fea3d7fdc", - "problem_statement": "method = builtin_str(method) problem\nIn requests/sessions.py is a command:\n\nmethod = builtin_str(method)\nConverts method from\nb\u2019GET\u2019\nto\n\"b'GET\u2019\"\n\nWhich is the literal string, no longer a binary string. When requests tries to use the method \"b'GET\u2019\u201d, it gets a 404 Not Found response.\n\nI am using python3.4 and python-neutronclient (2.3.9) with requests (2.4.3). neutronclient is broken because it uses this \"args = utils.safe_encode_list(args)\" command which converts all the values to binary string, including method.\n\nI'm not sure if this is a bug with neutronclient or a bug with requests, but I'm starting here. Seems if requests handled the method value being a binary string, we wouldn't have any problem.\n\nAlso, I tried in python2.6 and this bug doesn't exist there. Some difference between 2.6 and 3.4 makes this not work right.\n\n", - "golden_patch": "diff --git a/requests/sessions.py b/requests/sessions.py\n--- a/requests/sessions.py\n+++ b/requests/sessions.py\n@@ -13,7 +13,7 @@\n from datetime import datetime\n \n from .auth import _basic_auth_str\n-from .compat import cookielib, OrderedDict, urljoin, urlparse, builtin_str\n+from .compat import cookielib, OrderedDict, urljoin, urlparse\n from .cookies import (\n cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)\n from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT\n@@ -425,7 +425,7 @@ def request(self, method, url,\n If Tuple, ('cert', 'key') pair.\n \"\"\"\n \n- method = builtin_str(method)\n+ method = to_native_string(method)\n \n # Create the Request.\n req = Request(\n", - "expected_spans": { - "requests/sessions.py": [ - "imports", - "Session.request" - ] - }, - "resolved_by": [ + }, { - "name": "20240402_sweagent_gpt4", - "patch": "diff --git a/requests/sessions.py b/requests/sessions.py\nindex d701ff2e..4829747d 100644\n--- a/requests/sessions.py\n+++ b/requests/sessions.py\n@@ -425,6 +425,9 @@ class Session(SessionRedirectMixin):\n If Tuple, ('cert', 'key') pair.\n \"\"\"\n \n+ # Ensure method is a string, decode if it's a binary string\n+ if isinstance(method, bytes):\n+ method = method.decode('ascii')\n method = builtin_str(method)\n \n # Create the Request.\n" + "name": "20240820_honeycomb", + "updated_spans": { + "requests/models.py": [ + "docstring", + "Request.__init__", + "Request.register_hook" + ] + }, + "alternative_spans": { + "requests/models.py": [ + "docstring", + "Request.__init__", + "Request.register_hook" + ] + } }, { - "name": "20240523_aider", - "patch": "diff --git a/requests/sessions.py b/requests/sessions.py\nindex d701ff2e..e9429a8e 100644\n--- a/requests/sessions.py\n+++ b/requests/sessions.py\n@@ -425,6 +425,14 @@ class Session(SessionRedirectMixin):\n If Tuple, ('cert', 'key') pair.\n \"\"\"\n \n+ if isinstance(method, bytes):\n+ method = method.decode('utf-8')\n+ if isinstance(method, bytes):\n+ method = method.decode('utf-8')\n+ if isinstance(method, bytes):\n+ method = method.decode('utf-8')\n+ if isinstance(method, bytes):\n+ method = method.decode('utf-8')\n method = builtin_str(method)\n \n # Create the Request.\n" + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "requests/models.py": [ + "Request.__init__", + "Request.register_hook" + ] + }, + "alternative_spans": { + "requests/models.py": [ + "Request.__init__", + "Request.register_hook" + ] + } }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/requests/sessions.py\n+++ b/requests/sessions.py\n@@ -425,6 +425,8 @@\n If Tuple, ('cert', 'key') pair.\n \"\"\"\n \n+ if isinstance(method, bytes):\n+ method = method.decode('utf-8')\n method = builtin_str(method)\n \n # Create the Request.\n" + "name": "20240908_infant_gpt4o", + "updated_spans": { + "requests/models.py": [ + "Request.__init__", + "Request.register_hook" + ] + }, + "alternative_spans": { + "requests/models.py": [ + "Request.__init__", + "Request.register_hook" + ] + } }, { - "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/requests/sessions.py b/requests/sessions.py\nindex d701ff2e..334c96e0 100644\n--- a/requests/sessions.py\n+++ b/requests/sessions.py\n@@ -425,7 +425,10 @@ class Session(SessionRedirectMixin):\n If Tuple, ('cert', 'key') pair.\n \"\"\"\n \n- method = builtin_str(method)\n+ if isinstance(method, bytes):\n+ method = method.decode('utf-8')\n+ else:\n+ method = str(method)\n \n # Create the Request.\n req = Request(\n" + "name": "20240829_Isoform", + "updated_spans": { + "requests/models.py": [ + "Request.__init__" + ] + }, + "alternative_spans": { + "requests/models.py": [ + "Request.__init__" + ] + } }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/jedi_info_test_requests.txt b/jedi_info_test_requests.txt\nnew file mode 100644\nindex 00000000..6dafa040\n--- /dev/null\n+++ b/jedi_info_test_requests.txt\n@@ -0,0 +1,64 @@\n+Could not find information for test_requests.py\n+Trying test_requests\n+********************************************************************************\n+Information about test_requests\n+Defined names for test_requests are: \n+\t__future__.division\n+\tjson\n+\tos\n+\tpickle\n+\tunittest\n+\tcollections\n+\tio\n+\trequests\n+\tpytest\n+\trequests.adapters.HTTPAdapter\n+\trequests.auth.HTTPDigestAuth\n+\trequests.auth._basic_auth_str\n+\trequests.compat.Morsel\n+\trequests.compat.cookielib\n+\trequests.compat.getproxies\n+\trequests.compat.str\n+\trequests.compat.urljoin\n+\trequests.compat.urlparse\n+\trequests.compat.is_py3\n+\trequests.compat.builtin_str\n+\trequests.cookies.cookiejar_from_dict\n+\trequests.cookies.morsel_to_cookie\n+\trequests.exceptions.ConnectionError\n+\trequests.exceptions.ConnectTimeout\n+\trequests.exceptions.InvalidSchema\n+\trequests.exceptions.InvalidURL\n+\trequests.exceptions.MissingSchema\n+\trequests.exceptions.ReadTimeout\n+\trequests.exceptions.Timeout\n+\trequests.models.PreparedRequest\n+\trequests.structures.CaseInsensitiveDict\n+\trequests.sessions.SessionRedirectMixin\n+\trequests.models.urlencode\n+\trequests.hooks.default_hooks\n+\tStringIO\n+\tio\n+\ttest_requests.u\n+\ttest_requests.u\n+\ttest_requests.TARPIT\n+\ttest_requests.HTTPBIN\n+\ttest_requests.httpbin\n+\ttest_requests.RequestsTestCase\n+\ttest_requests.TestContentEncodingDetection\n+\ttest_requests.TestCaseInsensitiveDict\n+\ttest_requests.UtilsTestCase\n+\ttest_requests.TestMorselToCookieExpires\n+\ttest_requests.TestMorselToCookieMaxAge\n+\ttest_requests.TestTimeout\n+\ttest_requests.SendCall\n+\ttest_requests.RedirectSession\n+\ttest_requests.TestRedirects\n+\ttest_requests.list_of_tuples\n+\ttest_requests.test_data_argument_accepts_tuples\n+\ttest_requests.assert_copy\n+\ttest_requests.test_prepared_request_empty_copy\n+\ttest_requests.test_prepared_request_no_cookies_copy\n+\ttest_requests.test_prepared_request_complete_copy\n+\ttest_requests.test_prepare_unicode_url\n+Defined in file: /psf__requests/test_requests.py\n\\ No newline at end of file\ndiff --git a/requests/sessions.py b/requests/sessions.py\nindex d701ff2e..696285e1 100644\n--- a/requests/sessions.py\n+++ b/requests/sessions.py\n@@ -425,7 +425,10 @@ class Session(SessionRedirectMixin):\n If Tuple, ('cert', 'key') pair.\n \"\"\"\n \n- method = builtin_str(method)\n+ if isinstance(method, bytes):\n+ method = method.decode('ascii')\n+ else:\n+ method = builtin_str(method)\n \n # Create the Request.\n req = Request(\n" - } - ], - "alternative_spans": [ + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "requests/models.py": [ + "Request.__init__" + ] + }, + "alternative_spans": { + "requests/models.py": [ + "Request.__init__" + ] + } + }, { - "run_name": "20240402_sweagent_gpt4", - "spans": { - "requests/sessions.py": [ - "Session.request" + "name": "20240622_Lingma_Agent", + "updated_spans": { + "requests/models.py": [ + "Request.__init__", + "Request.register_hook" + ] + }, + "alternative_spans": { + "requests/models.py": [ + "Request.__init__", + "Request.register_hook" ] } }, { - "run_name": "20240523_aider", - "spans": { - "requests/sessions.py": [ - "Session.request" + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "requests/models.py": [ + "Request.__init__" + ] + }, + "alternative_spans": { + "requests/models.py": [ + "Request.__init__" ] } }, { - "run_name": "20240524_opencsg_starship_gpt4", - "spans": { - "requests/sessions.py": [ - "Session.request" + "name": "20240617_factory_code_droid", + "updated_spans": { + "requests/models.py": [ + "Request.__init__" + ] + }, + "alternative_spans": { + "requests/models.py": [ + "Request.__init__" ] } }, { - "run_name": "20240530_autocoderover-v20240408", - "spans": { - "requests/sessions.py": [ - "Session.request" + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "requests/cookies.py": [ + "docstring", + "RequestsCookieJar" + ], + "requests/models.py": [ + "Request.__init__", + "Response.content" + ], + "requests/packages/urllib3/_collections.py": [ + "docstring" + ] + }, + "alternative_spans": { + "requests/models.py": [ + "Request.__init__", + "Response.content" ] } }, { - "run_name": "20240612_IBM_Research_Agent101", - "spans": { + "name": "20231010_rag_swellama7b", + "updated_spans": { + "requests/hooks.py": [ + "dispatch_hook" + ], + "requests/models.py": [ + "Request.__init__" + ], + "requests/packages/oauthlib/oauth1/rfc5849/utils.py": [ + "filter_params" + ], "requests/sessions.py": [ "Session.request" ] + }, + "alternative_spans": { + "requests/models.py": [ + "Request.__init__" + ] } - } - ] - }, - { - "instance_id": "psf__requests-2674", - "repo": "psf/requests", - "base_commit": "0be38a0c37c59c4b66ce908731da15b401655113", - "problem_statement": "urllib3 exceptions passing through requests API\nI don't know if it's a design goal of requests to hide urllib3's exceptions and wrap them around requests.exceptions types.\n\n(If it's not IMHO it should be, but that's another discussion)\n\nIf it is, I have at least two of them passing through that I have to catch in addition to requests' exceptions. They are requests.packages.urllib3.exceptions.DecodeError and requests.packages.urllib3.exceptions.TimeoutError (this one I get when a proxy timeouts)\n\nThanks!\n\n", - "golden_patch": "diff --git a/requests/adapters.py b/requests/adapters.py\n--- a/requests/adapters.py\n+++ b/requests/adapters.py\n@@ -19,6 +19,7 @@\n from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,\n prepend_scheme_if_needed, get_auth_from_url, urldefragauth)\n from .structures import CaseInsensitiveDict\n+from .packages.urllib3.exceptions import ClosedPoolError\n from .packages.urllib3.exceptions import ConnectTimeoutError\n from .packages.urllib3.exceptions import HTTPError as _HTTPError\n from .packages.urllib3.exceptions import MaxRetryError\n@@ -421,6 +422,9 @@ def send(self, request, stream=False, timeout=None, verify=True, cert=None, prox\n \n raise ConnectionError(e, request=request)\n \n+ except ClosedPoolError as e:\n+ raise ConnectionError(e, request=request)\n+\n except _ProxyError as e:\n raise ProxyError(e)\n \n", - "expected_spans": { - "requests/adapters.py": [ - "imports:5", - "HTTPAdapter.send" - ] - }, - "resolved_by": [ + }, { - "name": "20240402_sweagent_claude3opus", - "patch": "diff --git a/reproduce.py b/reproduce.py\nnew file mode 100644\nindex 00000000..92551964\n--- /dev/null\n+++ b/reproduce.py\n@@ -0,0 +1,19 @@\n+import requests\n+\n+# Test DecodeError\n+try:\n+ r = requests.get('http://example.com/invalid')\n+except requests.exceptions.RequestException as e:\n+ print(f\"Caught RequestException: {type(e).__name__}\")\n+except Exception as e:\n+ print(f\"Caught unexpected exception: {type(e).__name__}\")\n+\n+# Test TimeoutError \n+try:\n+ r = requests.get('http://10.255.255.1', timeout=1)\n+except requests.exceptions.RequestException as e:\n+ print(f\"Caught RequestException: {type(e).__name__}\")\n+except Exception as e:\n+ print(f\"Caught unexpected exception: {type(e).__name__}\")\n+\n+print(\"Script completed\")\n" + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "requests/models.py": [ + "Request.__init__", + "Request.register_hook" + ] + }, + "alternative_spans": { + "requests/models.py": [ + "Request.__init__", + "Request.register_hook" + ] + } }, { - "name": "20240402_sweagent_gpt4", - "patch": "diff --git a/reproduce_exceptions.py b/reproduce_exceptions.py\nnew file mode 100644\nindex 00000000..14ac0dfa\n--- /dev/null\n+++ b/reproduce_exceptions.py\n@@ -0,0 +1,11 @@\n+import requests\n+from requests.packages.urllib3.exceptions import ReadTimeoutError\n+\n+# Attempt to trigger ReadTimeoutError\n+try:\n+ # This request should timeout quickly to trigger a ReadTimeoutError\n+ r = requests.get('http://httpbin.org/delay/3', timeout=3)\n+except ReadTimeoutError as e:\n+ print(\"Caught ReadTimeoutError from urllib3:\", e)\n+\n+print(\"Script completed successfully, no errors.\")\ndiff --git a/requests/adapters.py b/requests/adapters.py\nindex 841cbbfe..c85c5572 100644\n--- a/requests/adapters.py\n+++ b/requests/adapters.py\n@@ -24,7 +24,7 @@ from .packages.urllib3.exceptions import HTTPError as _HTTPError\n from .packages.urllib3.exceptions import MaxRetryError\n from .packages.urllib3.exceptions import ProxyError as _ProxyError\n from .packages.urllib3.exceptions import ProtocolError\n-from .packages.urllib3.exceptions import ReadTimeoutError\n+from .packages.urllib3.exceptions import ReadTimeoutError, DecodeError\n from .packages.urllib3.exceptions import SSLError as _SSLError\n from .packages.urllib3.exceptions import ResponseError\n from .cookies import extract_cookies_to_jar\n" + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "requests/models.py": [ + "Request.__init__" + ] + }, + "alternative_spans": { + "requests/models.py": [ + "Request.__init__" + ] + } }, { - "name": "20240523_aider", - "patch": "diff --git a/requests/exceptions.py b/requests/exceptions.py\nindex 89135a80..0e2a54d9 100644\n--- a/requests/exceptions.py\n+++ b/requests/exceptions.py\n@@ -91,9 +91,20 @@ class ContentDecodingError(RequestException, BaseHTTPError):\n \"\"\"Failed to decode response content\"\"\"\n \n \n+from .packages.urllib3.exceptions import DecodeError as Urllib3DecodeError, TimeoutError as Urllib3TimeoutError\n+\n+\n class StreamConsumedError(RequestException, TypeError):\n \"\"\"The content for this response was already consumed\"\"\"\n \n \n class RetryError(RequestException):\n \"\"\"Custom retries logic failed\"\"\"\n+\n+\n+class DecodeError(RequestException, Urllib3DecodeError):\n+ \"\"\"Failed to decode response content\"\"\"\n+\n+\n+class TimeoutError(RequestException, Urllib3TimeoutError):\n+ \"\"\"The request timed out\"\"\"\n" + "name": "20240811_gru", + "updated_spans": { + "requests/hooks.py": [ + "docstring", + "dispatch_hook" + ] + }, + "alternative_spans": {} }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/requests/adapters.py\n+++ b/requests/adapters.py\n@@ -26,7 +26,8 @@\n from .packages.urllib3.exceptions import ProtocolError\n from .packages.urllib3.exceptions import ReadTimeoutError\n from .packages.urllib3.exceptions import SSLError as _SSLError\n-from .packages.urllib3.exceptions import ResponseError\n+from .packages.urllib3.exceptions import DecodeError, TimeoutError, ResponseError\n+from .exceptions import ContentDecodingError, Timeout\n from .cookies import extract_cookies_to_jar\n from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,\n ProxyError, RetryError)\n@@ -299,6 +300,12 @@\n \"\"\"\n pass\n \n+ def _wrap_urllib3_exceptions(self, e):\n+ if isinstance(e, DecodeError):\n+ raise ContentDecodingError(e)\n+ elif isinstance(e, TimeoutError):\n+ raise Timeout(e)\n+\n def proxy_headers(self, proxy):\n \"\"\"Returns a dictionary of the headers to add to any request sent\n through a proxy. This works with urllib3 magic to ensure that they are\n@@ -333,6 +340,7 @@\n :param verify: (optional) Whether to verify SSL certificates.\n :param cert: (optional) Any user-provided SSL certificate to be trusted.\n :param proxies: (optional) The proxies dictionary to apply to the request.\n+ try:\n \"\"\"\n \n conn = self.get_connection(request.url, proxies)\n@@ -432,4 +440,6 @@\n else:\n raise\n \n+ except (DecodeError, TimeoutError) as e:\n+ self._wrap_urllib3_exceptions(e)\n return self.build_response(request, resp)\n" + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "requests/models.py": [ + "Request.__init__", + "Request.register_hook" + ], + "tests/test_requests.py": [ + "RequestsTestSuite.test_session_persistent_cookies" + ] + }, + "alternative_spans": { + "requests/models.py": [ + "Request.__init__", + "Request.register_hook" + ] + } }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/reproduce.py b/reproduce.py\nnew file mode 100644\nindex 00000000..e2f27a18\n--- /dev/null\n+++ b/reproduce.py\n@@ -0,0 +1,17 @@\n+import requests\n+from requests.exceptions import RequestException\n+from requests.packages.urllib3.exceptions import DecodeError, TimeoutError\n+\n+try:\n+ # Simulate a request that could raise a DecodeError\n+ response = requests.get('http://example.com', timeout=0.001)\n+except DecodeError as e:\n+ print(\"Caught urllib3 DecodeError:\", e)\n+except TimeoutError as e:\n+ print(\"Caught urllib3 TimeoutError:\", e)\n+except RequestException as e:\n+ print(\"Caught requests RequestException:\", e)\n+except Exception as e:\n+ print(\"Caught unexpected exception:\", e)\n+else:\n+ print(\"Request completed successfully:\", response.status_code)\ndiff --git a/requests/adapters.py b/requests/adapters.py\nindex 841cbbfe..e302c37d 100644\n--- a/requests/adapters.py\n+++ b/requests/adapters.py\n@@ -19,17 +19,29 @@ from .compat import urlparse, basestring\n from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,\n prepend_scheme_if_needed, get_auth_from_url, urldefragauth)\n from .structures import CaseInsensitiveDict\n-from .packages.urllib3.exceptions import ConnectTimeoutError\n-from .packages.urllib3.exceptions import HTTPError as _HTTPError\n-from .packages.urllib3.exceptions import MaxRetryError\n-from .packages.urllib3.exceptions import ProxyError as _ProxyError\n-from .packages.urllib3.exceptions import ProtocolError\n-from .packages.urllib3.exceptions import ReadTimeoutError\n-from .packages.urllib3.exceptions import SSLError as _SSLError\n-from .packages.urllib3.exceptions import ResponseError\n+from .packages.urllib3.exceptions import (\n+ ConnectTimeoutError,\n+ DecodeError,\n+ HTTPError as _HTTPError,\n+ MaxRetryError,\n+ ProxyError as _ProxyError,\n+ ProtocolError,\n+ ReadTimeoutError,\n+ ResponseError,\n+ SSLError as _SSLError,\n+ TimeoutError\n+)\n from .cookies import extract_cookies_to_jar\n-from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,\n- ProxyError, RetryError)\n+from .exceptions import (\n+ ConnectionError,\n+ ConnectTimeout,\n+ ContentDecodingError,\n+ ReadTimeout,\n+ SSLError,\n+ ProxyError,\n+ RetryError,\n+ Timeout\n+)\n from .auth import _basic_auth_str\n \n DEFAULT_POOLBLOCK = False\n" + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "requests/models.py": [ + "Request.__init__", + "Request.register_hook" + ] + }, + "alternative_spans": { + "requests/models.py": [ + "Request.__init__", + "Request.register_hook" + ] + } } ], "alternative_spans": [ { - "run_name": "20240402_sweagent_gpt4", + "run_name": "20240524_opencsg_starship_gpt4", "spans": { - "requests/adapters.py": [ - "imports:11" + "requests/models.py": [ + "Request.__init__" ] } }, { - "run_name": "20240524_opencsg_starship_gpt4", + "run_name": "20240630_agentless_gpt4o", "spans": { - "requests/adapters.py": [ - "imports:13", - "HTTPAdapter.proxy_headers", - "HTTPAdapter.send", - "HTTPAdapter" + "requests/models.py": [ + "Request.__init__" ] } }, { - "run_name": "20240612_IBM_Research_Agent101", + "run_name": "20240530_autocoderover-v20240408", "spans": { - "requests/adapters.py": [ - "imports:5", - "impl:2", - "imports:10", - "impl:3", - "imports:11", - "impl:4", - "imports:13" + "requests/models.py": [ + "Request.__init__" ] } - } - ] - }, - { - "instance_id": "psf__requests-3362", - "repo": "psf/requests", - "base_commit": "36453b95b13079296776d11b09cab2567ea3e703", - "problem_statement": "Uncertain about content/text vs iter_content(decode_unicode=True/False)\nWhen requesting an application/json document, I'm seeing `next(r.iter_content(16*1024, decode_unicode=True))` returning bytes, whereas `r.text` returns unicode. My understanding was that both should return a unicode object. In essence, I thought \"iter_content\" was equivalent to \"iter_text\" when decode_unicode was True. Have I misunderstood something? I can provide an example if needed.\n\nFor reference, I'm using python 3.5.1 and requests 2.10.0.\n\nThanks!\n\n", - "golden_patch": "diff --git a/requests/utils.py b/requests/utils.py\n--- a/requests/utils.py\n+++ b/requests/utils.py\n@@ -358,13 +358,20 @@ def get_encoding_from_headers(headers):\n \n def stream_decode_response_unicode(iterator, r):\n \"\"\"Stream decodes a iterator.\"\"\"\n+ encoding = r.encoding\n \n- if r.encoding is None:\n- for item in iterator:\n- yield item\n- return\n+ if encoding is None:\n+ encoding = r.apparent_encoding\n+\n+ try:\n+ decoder = codecs.getincrementaldecoder(encoding)(errors='replace')\n+ except (LookupError, TypeError):\n+ # A LookupError is raised if the encoding was not found which could\n+ # indicate a misspelling or similar mistake.\n+ #\n+ # A TypeError can be raised if encoding is None\n+ raise UnicodeError(\"Unable to decode contents with encoding %s.\" % encoding)\n \n- decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')\n for chunk in iterator:\n rv = decoder.decode(chunk)\n if rv:\n", - "expected_spans": { - "requests/utils.py": [ - "stream_decode_response_unicode" - ] - }, - "resolved_by": [ + }, { - "name": "20240509_amazon-q-developer-agent-20240430-dev", - "patch": "--- a/requests/utils.py\n+++ b/requests/utils.py\n@@ -354,17 +354,20 @@\n \n if 'text' in content_type:\n return 'ISO-8859-1'\n \n \n def stream_decode_response_unicode(iterator, r):\n- \"\"\"Stream decodes a iterator.\"\"\"\n+ \"\"\"Stream decodes an iterator.\"\"\"\n \n if r.encoding is None:\n for item in iterator:\n- yield item\n+ if isinstance(item, bytes):\n+ yield item.decode('utf-8', errors='replace')\n+ else:\n+ yield item\n return\n \n decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')\n for chunk in iterator:\n rv = decoder.decode(chunk)\n if rv:\n@@ -374,12 +377,14 @@\n yield rv\n \n \n def iter_slices(string, slice_length):\n \"\"\"Iterate over slices of a string.\"\"\"\n pos = 0\n+ if isinstance(string, str):\n+ string = string.encode('utf-8') \n while pos < len(string):\n yield string[pos:pos + slice_length]\n pos += slice_length\n \n \n def get_unicode_from_response(r):\n" + "run_name": "20240706_sima_gpt4o", + "spans": { + "requests/models.py": [ + "Request.__init__" + ] + } }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/requests/utils.py\n+++ b/requests/utils.py\n@@ -359,12 +359,21 @@\n def stream_decode_response_unicode(iterator, r):\n \"\"\"Stream decodes a iterator.\"\"\"\n \n- if r.encoding is None:\n- for item in iterator:\n- yield item\n- return\n-\n- decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')\n+ encoding = r.encoding or 'utf-8' # Fallback to utf-8 if r.encoding is None or invalid\n+\n+ try:\n+ decoder = codecs.getincrementaldecoder(encoding)(errors='replace')\n+ except LookupError: # Handle invalid encodings by falling back to utf-8\n+ encoding = 'utf-8'\n+ decoder = codecs.getincrementaldecoder(encoding)(errors='replace')\n+\n+ for chunk in iterator:\n+ rv = decoder.decode(chunk)\n+ if rv:\n+ yield rv\n+ rv = decoder.decode(b'', final=True)\n+ if rv:\n+ yield rv\n for chunk in iterator:\n rv = decoder.decode(chunk)\n if rv:\n" + "run_name": "20240623_moatless_claude35sonnet", + "spans": { + "requests/models.py": [ + "Request.__init__" + ] + } }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/requests/models.py b/requests/models.py\nindex fbb3c7e6..8cb86125 100644\n--- a/requests/models.py\n+++ b/requests/models.py\n@@ -695,6 +695,8 @@ class Response(object):\n chunks = reused_chunks if self._content_consumed else stream_chunks\n \n if decode_unicode:\n+ if self.encoding is None:\n+ self.encoding = self.apparent_encoding or 'utf-8'\n chunks = stream_decode_response_unicode(chunks, self)\n \n return chunks\n" + "run_name": "20240829_Isoform", + "spans": { + "requests/models.py": [ + "Request.__init__" + ] + } }, { - "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/requests/utils.py b/requests/utils.py\nindex 8d17b6b2..69327170 100644\n--- a/requests/utils.py\n+++ b/requests/utils.py\n@@ -356,16 +356,18 @@ def get_encoding_from_headers(headers):\n return 'ISO-8859-1'\n \n \n+import codecs\n+\n def stream_decode_response_unicode(iterator, r):\n \"\"\"Stream decodes a iterator.\"\"\"\n-\n- if r.encoding is None:\n- for item in iterator:\n- yield item\n- return\n-\n- decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')\n+ # Ensure we have a valid encoding to decode with.\n+ encoding = r.encoding if r.encoding is not None else 'utf-8'\n+ \n+ decoder = codecs.getincrementaldecoder(encoding)(errors='replace')\n for chunk in iterator:\n+ # Ensure chunk is bytes, if it's already text, encode it first (shouldn't normally happen).\n+ if isinstance(chunk, str):\n+ chunk = chunk.encode(encoding)\n rv = decoder.decode(chunk)\n if rv:\n yield rv\n" + "run_name": "20241028_agentless-1.5_gpt4o", + "spans": { + "requests/models.py": [ + "Request.__init__" + ] + } }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/jedi_info_requests.models.Response.txt b/jedi_info_requests.models.Response.txt\nnew file mode 100644\nindex 00000000..505a5d54\n--- /dev/null\n+++ b/jedi_info_requests.models.Response.txt\n@@ -0,0 +1,334 @@\n+Could not find information for requests.models.Response.stream_decode_response_unicode\n+Trying requests.models.Response\n+********************************************************************************\n+Information about requests.models.Response\n+Defined in file: /psf__requests/requests/models.py\n+Definition starts at line: 536\n+Definition ends at line: 861\n+Definition:\n+class Response(object):\n+ \"\"\"The :class:`Response ` object, which contains a\n+ server's response to an HTTP request.\n+ \"\"\"\n+\n+ __attrs__ = [\n+ '_content', 'status_code', 'headers', 'url', 'history',\n+ 'encoding', 'reason', 'cookies', 'elapsed', 'request'\n+ ]\n+\n+ def __init__(self):\n+ super(Response, self).__init__()\n+\n+ self._content = False\n+ self._content_consumed = False\n+\n+ #: Integer Code of responded HTTP Status, e.g. 404 or 200.\n+ self.status_code = None\n+\n+ #: Case-insensitive Dictionary of Response Headers.\n+ #: For example, ``headers['content-encoding']`` will return the\n+ #: value of a ``'Content-Encoding'`` response header.\n+ self.headers = CaseInsensitiveDict()\n+\n+ #: File-like object representation of response (for advanced usage).\n+ #: Use of ``raw`` requires that ``stream=True`` be set on the request.\n+ # This requirement does not apply for use internally to Requests.\n+ self.raw = None\n+\n+ #: Final URL location of Response.\n+ self.url = None\n+\n+ #: Encoding to decode with when accessing r.text.\n+ self.encoding = None\n+\n+ #: A list of :class:`Response ` objects from\n+ #: the history of the Request. Any redirect responses will end\n+ #: up here. The list is sorted from the oldest to the most recent request.\n+ self.history = []\n+\n+ #: Textual reason of responded HTTP Status, e.g. \"Not Found\" or \"OK\".\n+ self.reason = None\n+\n+ #: A CookieJar of Cookies the server sent back.\n+ self.cookies = cookiejar_from_dict({})\n+\n+ #: The amount of time elapsed between sending the request\n+ #: and the arrival of the response (as a timedelta).\n+ #: This property specifically measures the time taken between sending\n+ #: the first byte of the request and finishing parsing the headers. It\n+ #: is therefore unaffected by consuming the response content or the\n+ #: value of the ``stream`` keyword argument.\n+ self.elapsed = datetime.timedelta(0)\n+\n+ #: The :class:`PreparedRequest ` object to which this\n+ #: is a response.\n+ self.request = None\n+\n+ def __getstate__(self):\n+ # Consume everything; accessing the content attribute makes\n+ # sure the content has been fully read.\n+ if not self._content_consumed:\n+ self.content\n+\n+ return dict(\n+ (attr, getattr(self, attr, None))\n+ for attr in self.__attrs__\n+ )\n+\n+ def __setstate__(self, state):\n+ for name, value in state.items():\n+ setattr(self, name, value)\n+\n+ # pickled objects do not have .raw\n+ setattr(self, '_content_consumed', True)\n+ setattr(self, 'raw', None)\n+\n+ def __repr__(self):\n+ return '' % (self.status_code)\n+\n+ def __bool__(self):\n+ \"\"\"Returns true if :attr:`status_code` is 'OK'.\"\"\"\n+ return self.ok\n+\n+ def __nonzero__(self):\n+ \"\"\"Returns true if :attr:`status_code` is 'OK'.\"\"\"\n+ return self.ok\n+\n+ def __iter__(self):\n+ \"\"\"Allows you to use a response as an iterator.\"\"\"\n+ return self.iter_content(128)\n+\n+ @property\n+ def ok(self):\n+ try:\n+ self.raise_for_status()\n+ except HTTPError:\n+ return False\n+ return True\n+\n+ @property\n+ def is_redirect(self):\n+ \"\"\"True if this Response is a well-formed HTTP redirect that could have\n+ been processed automatically (by :meth:`Session.resolve_redirects`).\n+ \"\"\"\n+ return ('location' in self.headers and self.status_code in REDIRECT_STATI)\n+\n+ @property\n+ def is_permanent_redirect(self):\n+ \"\"\"True if this Response one of the permanent versions of redirect\"\"\"\n+ return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))\n+\n+ @property\n+ def apparent_encoding(self):\n+ \"\"\"The apparent encoding, provided by the chardet library\"\"\"\n+ return chardet.detect(self.content)['encoding']\n+\n+ def iter_content(self, chunk_size=1, decode_unicode=False):\n+ \"\"\"Iterates over the response data. When stream=True is set on the\n+ request, this avoids reading the content at once into memory for\n+ large responses. The chunk size is the number of bytes it should\n+ read into memory. This is not necessarily the length of each item\n+ returned as decoding can take place.\n+\n+ If decode_unicode is True, content will be decoded using the best\n+ available encoding based on the response.\n+ \"\"\"\n+\n+ def generate():\n+ # Special case for urllib3.\n+ if hasattr(self.raw, 'stream'):\n+ try:\n+ for chunk in self.raw.stream(chunk_size, decode_content=True):\n+ yield chunk\n+ except ProtocolError as e:\n+ raise ChunkedEncodingError(e)\n+ except DecodeError as e:\n+ raise ContentDecodingError(e)\n+ except ReadTimeoutError as e:\n+ raise ConnectionError(e)\n+ else:\n+ # Standard file-like object.\n+ while True:\n+ chunk = self.raw.read(chunk_size)\n+ if not chunk:\n+ break\n+ yield chunk\n+\n+ self._content_consumed = True\n+\n+ if self._content_consumed and isinstance(self._content, bool):\n+ raise StreamConsumedError()\n+ elif not isinstance(chunk_size, int):\n+ raise TypeError(\"chunk_size must be an int, it is instead a %s.\" % type(chunk_size))\n+ # simulate reading small chunks of the content\n+ reused_chunks = iter_slices(self._content, chunk_size)\n+\n+ stream_chunks = generate()\n+\n+ chunks = reused_chunks if self._content_consumed else stream_chunks\n+\n+ if decode_unicode:\n+ chunks = stream_decode_response_unicode(chunks, self)\n+\n+ return chunks\n+\n+ def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):\n+ \"\"\"Iterates over the response data, one line at a time. When\n+ stream=True is set on the request, this avoids reading the\n+ content at once into memory for large responses.\n+\n+ .. note:: This method is not reentrant safe.\n+ \"\"\"\n+\n+ pending = None\n+\n+ for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):\n+\n+ if pending is not None:\n+ chunk = pending + chunk\n+\n+ if delimiter:\n+ lines = chunk.split(delimiter)\n+ else:\n+ lines = chunk.splitlines()\n+\n+ if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:\n+ pending = lines.pop()\n+ else:\n+ pending = None\n+\n+ for line in lines:\n+ yield line\n+\n+ if pending is not None:\n+ yield pending\n+\n+ @property\n+ def content(self):\n+ \"\"\"Content of the response, in bytes.\"\"\"\n+\n+ if self._content is False:\n+ # Read the contents.\n+ try:\n+ if self._content_consumed:\n+ raise RuntimeError(\n+ 'The content for this response was already consumed')\n+\n+ if self.status_code == 0:\n+ self._content = None\n+ else:\n+ self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()\n+\n+ except AttributeError:\n+ self._content = None\n+\n+ self._content_consumed = True\n+ # don't need to release the connection; that's been handled by urllib3\n+ # since we exhausted the data.\n+ return self._content\n+\n+ @property\n+ def text(self):\n+ \"\"\"Content of the response, in unicode.\n+\n+ If Response.encoding is None, encoding will be guessed using\n+ ``chardet``.\n+\n+ The encoding of the response content is determined based solely on HTTP\n+ headers, following RFC 2616 to the letter. If you can take advantage of\n+ non-HTTP knowledge to make a better guess at the encoding, you should\n+ set ``r.encoding`` appropriately before accessing this property.\n+ \"\"\"\n+\n+ # Try charset from content-type\n+ content = None\n+ encoding = self.encoding\n+\n+ if not self.content:\n+ return str('')\n+\n+ # Fallback to auto-detected encoding.\n+ if self.encoding is None:\n+ encoding = self.apparent_encoding\n+\n+ # Decode unicode from given encoding.\n+ try:\n+ content = str(self.content, encoding, errors='replace')\n+ except (LookupError, TypeError):\n+ # A LookupError is raised if the encoding was not found which could\n+ # indicate a misspelling or similar mistake.\n+ #\n+ # A TypeError can be raised if encoding is None\n+ #\n+ # So we try blindly encoding.\n+ content = str(self.content, errors='replace')\n+\n+ return content\n+\n+ def json(self, **kwargs):\n+ \"\"\"Returns the json-encoded content of a response, if any.\n+\n+ :param \\*\\*kwargs: Optional arguments that ``json.loads`` takes.\n+ \"\"\"\n+\n+ if not self.encoding and self.content and len(self.content) > 3:\n+ # No encoding set. JSON RFC 4627 section 3 states we should expect\n+ # UTF-8, -16 or -32. Detect which one to use; If the detection or\n+ # decoding fails, fall back to `self.text` (using chardet to make\n+ # a best guess).\n+ encoding = guess_json_utf(self.content)\n+ if encoding is not None:\n+ try:\n+ return complexjson.loads(\n+ self.content.decode(encoding), **kwargs\n+ )\n+ except UnicodeDecodeError:\n+ # Wrong UTF codec detected; usually because it's not UTF-8\n+ # but some other 8-bit codec. This is an RFC violation,\n+ # and the server didn't bother to tell us what codec *was*\n+ # used.\n+ pass\n+ return complexjson.loads(self.text, **kwargs)\n+\n+ @property\n+ def links(self):\n+ \"\"\"Returns the parsed header links of the response, if any.\"\"\"\n+\n+ header = self.headers.get('link')\n+\n+ # l = MultiDict()\n+ l = {}\n+\n+ if header:\n+ links = parse_header_links(header)\n+\n+ for link in links:\n+ key = link.get('rel') or link.get('url')\n+ l[key] = link\n+\n+ return l\n+\n+ def raise_for_status(self):\n+ \"\"\"Raises stored :class:`HTTPError`, if one occurred.\"\"\"\n+\n+ http_error_msg = ''\n+\n+ if 400 <= self.status_code < 500:\n+ http_error_msg = '%s Client Error: %s for url: %s' % (self.status_code, self.reason, self.url)\n+\n+ elif 500 <= self.status_code < 600:\n+ http_error_msg = '%s Server Error: %s for url: %s' % (self.status_code, self.reason, self.url)\n+\n+ if http_error_msg:\n+ raise HTTPError(http_error_msg, response=self)\n+\n+ def close(self):\n+ \"\"\"Releases the connection back to the pool. Once this method has been\n+ called the underlying ``raw`` object must not be accessed again.\n+\n+ *Note: Should not normally need to be called explicitly.*\n+ \"\"\"\n+ if not self._content_consumed:\n+ return self.raw.close()\n+\n+ return self.raw.release_conn()\ndiff --git a/requests/utils.py b/requests/utils.py\nindex 8d17b6b2..6393e972 100644\n--- a/requests/utils.py\n+++ b/requests/utils.py\n@@ -360,9 +360,15 @@ def stream_decode_response_unicode(iterator, r):\n \"\"\"Stream decodes a iterator.\"\"\"\n \n if r.encoding is None:\n- for item in iterator:\n- yield item\n- return\n+ r.encoding = r.apparent_encoding\n+ decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')\n+ for chunk in iterator:\n+ rv = decoder.decode(chunk)\n+ if rv:\n+ yield rv\n+ rv = decoder.decode(b'', final=True)\n+ if rv:\n+ yield rv\n \n decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')\n for chunk in iterator:\ndiff --git a/tests/conftest.py b/tests/conftest.py\nindex af20e54d..3b7185b9 100644\n--- a/tests/conftest.py\n+++ b/tests/conftest.py\n@@ -14,10 +14,9 @@ def prepare_url(value):\n \n \n @pytest.fixture\n-def httpbin(httpbin):\n+def httpbin():\n return prepare_url(httpbin)\n \n-\n @pytest.fixture\n-def httpbin_secure(httpbin_secure):\n+def httpbin_secure():\n return prepare_url(httpbin_secure)\ndiff --git a/tests/test_utils.py b/tests/test_utils.py\nindex 17149d26..8a605bb0 100644\n--- a/tests/test_utils.py\n+++ b/tests/test_utils.py\n@@ -25,8 +25,8 @@ class TestSuperLen:\n 'stream, value', (\n (StringIO.StringIO, 'Test'),\n (BytesIO, b'Test'),\n- pytest.mark.skipif('cStringIO is None')((cStringIO, 'Test')),\n ))\n+ @pytest.mark.skipif(cStringIO is None, reason=\"cStringIO is None\")\n def test_io_streams(self, stream, value):\n \"\"\"Ensures that we properly deal with different kinds of IO streams.\"\"\"\n assert super_len(stream()) == 0\n" + "run_name": "20241016_IBM-SWE-1.0", + "spans": { + "requests/models.py": [ + "Request.__init__" + ] + } + }, + { + "run_name": "20240617_factory_code_droid", + "spans": { + "requests/models.py": [ + "Request.__init__" + ] + } + }, + { + "run_name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "spans": { + "requests/models.py": [ + "Request.__init__", + "Response.content" + ] + } + }, + { + "run_name": "20231010_rag_swellama7b", + "spans": { + "requests/models.py": [ + "Request.__init__" + ] + } + }, + { + "run_name": "20240509_amazon-q-developer-agent-20240430-dev", + "spans": { + "requests/models.py": [ + "Request.__init__" + ] + } } - ], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-11400", - "repo": "sympy/sympy", - "base_commit": "8dcb12a6cf500e8738d6729ab954a261758f49ca", - "problem_statement": "ccode(sinc(x)) doesn't work\n```\nIn [30]: ccode(sinc(x))\nOut[30]: '// Not supported in C:\\n// sinc\\nsinc(x)'\n```\n\nI don't think `math.h` has `sinc`, but it could print\n\n```\nIn [38]: ccode(Piecewise((sin(theta)/theta, Ne(theta, 0)), (1, True)))\nOut[38]: '((Ne(theta, 0)) ? (\\n sin(theta)/theta\\n)\\n: (\\n 1\\n))'\n```\n\n", - "golden_patch": "diff --git a/sympy/printing/ccode.py b/sympy/printing/ccode.py\n--- a/sympy/printing/ccode.py\n+++ b/sympy/printing/ccode.py\n@@ -231,6 +231,20 @@ def _print_Symbol(self, expr):\n else:\n return name\n \n+ def _print_Relational(self, expr):\n+ lhs_code = self._print(expr.lhs)\n+ rhs_code = self._print(expr.rhs)\n+ op = expr.rel_op\n+ return (\"{0} {1} {2}\").format(lhs_code, op, rhs_code)\n+\n+ def _print_sinc(self, expr):\n+ from sympy.functions.elementary.trigonometric import sin\n+ from sympy.core.relational import Ne\n+ from sympy.functions import Piecewise\n+ _piecewise = Piecewise(\n+ (sin(expr.args[0]) / expr.args[0], Ne(expr.args[0], 0)), (1, True))\n+ return self._print(_piecewise)\n+\n def _print_AugmentedAssignment(self, expr):\n lhs_code = self._print(expr.lhs)\n op = expr.rel_op\n", - "expected_spans": { - "sympy/printing/ccode.py": [ - "CCodePrinter._print_AugmentedAssignment" - ] - }, - "resolved_by": [], - "alternative_spans": [] + ] }, { - "instance_id": "sympy__sympy-11870", - "repo": "sympy/sympy", - "base_commit": "5c2e1f96a7ff562d4a778f4ca9ffc9c81557197e", - "problem_statement": "simplifying exponential -> trig identities\n```\r\nf = 1 / 2 * (-I*exp(I*k) + I*exp(-I*k))\r\ntrigsimp(f)\r\n```\r\n\r\nIdeally, this would yield `sin(k)`. Is there a way to do this?\r\n\r\nAs a corollary, it would be awesome if \r\n\r\n```\r\nf = 1 / 2 / k* (-I*exp(I*k) + I*exp(-I*k))\r\ntrigsimp(f)\r\n```\r\n\r\ncould yield `sinc(k)`. Thank you for your consideration!\n", - "golden_patch": "diff --git a/sympy/functions/elementary/trigonometric.py b/sympy/functions/elementary/trigonometric.py\n--- a/sympy/functions/elementary/trigonometric.py\n+++ b/sympy/functions/elementary/trigonometric.py\n@@ -16,6 +16,8 @@\n from sympy.sets.sets import FiniteSet\n from sympy.utilities.iterables import numbered_symbols\n from sympy.core.compatibility import range\n+from sympy.core.relational import Ne\n+from sympy.functions.elementary.piecewise import Piecewise\n \n ###############################################################################\n ########################## TRIGONOMETRIC FUNCTIONS ############################\n@@ -400,6 +402,9 @@ def _eval_rewrite_as_csc(self, arg):\n def _eval_rewrite_as_sec(self, arg):\n return 1 / sec(arg - S.Pi / 2, evaluate=False)\n \n+ def _eval_rewrite_as_sinc(self, arg):\n+ return arg*sinc(arg)\n+\n def _eval_conjugate(self):\n return self.func(self.args[0].conjugate())\n \n@@ -1789,7 +1794,7 @@ def _eval_rewrite_as_jn(self, arg):\n return jn(0, arg)\n \n def _eval_rewrite_as_sin(self, arg):\n- return sin(arg) / arg\n+ return Piecewise((sin(arg)/arg, Ne(arg, 0)), (1, True))\n \n \n ###############################################################################\n", + "instance_id": "psf__requests-1963", + "repo": "psf/requests", + "base_commit": "110048f9837f8441ea536804115e80b69f400277", + "problem_statement": "`Session.resolve_redirects` copies the original request for all subsequent requests, can cause incorrect method selection\nConsider the following redirection chain:\n\n```\nPOST /do_something HTTP/1.1\nHost: server.example.com\n...\n\nHTTP/1.1 303 See Other\nLocation: /new_thing_1513\n\nGET /new_thing_1513\nHost: server.example.com\n...\n\nHTTP/1.1 307 Temporary Redirect\nLocation: //failover.example.com/new_thing_1513\n```\n\nThe intermediate 303 See Other has caused the POST to be converted to\na GET. The subsequent 307 should preserve the GET. However, because\n`Session.resolve_redirects` starts each iteration by copying the _original_\nrequest object, Requests will issue a POST!\n\n", + "golden_patch": "diff --git a/requests/sessions.py b/requests/sessions.py\n--- a/requests/sessions.py\n+++ b/requests/sessions.py\n@@ -168,8 +168,11 @@ def resolve_redirects(self, resp, req, stream=False, timeout=None,\n if new_auth is not None:\n prepared_request.prepare_auth(new_auth)\n \n+ # Override the original request.\n+ req = prepared_request\n+\n resp = self.send(\n- prepared_request,\n+ req,\n stream=stream,\n timeout=timeout,\n verify=verify,\n", + "test_patch": "diff --git a/test_requests.py b/test_requests.py\n--- a/test_requests.py\n+++ b/test_requests.py\n@@ -8,6 +8,7 @@\n import os\n import pickle\n import unittest\n+import collections\n \n import requests\n import pytest\n@@ -18,6 +19,7 @@\n from requests.cookies import cookiejar_from_dict, morsel_to_cookie\n from requests.exceptions import InvalidURL, MissingSchema\n from requests.structures import CaseInsensitiveDict\n+from requests.sessions import SessionRedirectMixin\n \n try:\n import StringIO\n@@ -1187,5 +1189,64 @@ def test_stream_timeout(self):\n assert 'Read timed out' in e.args[0].args[0]\n \n \n+SendCall = collections.namedtuple('SendCall', ('args', 'kwargs'))\n+\n+\n+class RedirectSession(SessionRedirectMixin):\n+ def __init__(self, order_of_redirects):\n+ self.redirects = order_of_redirects\n+ self.calls = []\n+ self.max_redirects = 30\n+ self.cookies = {}\n+ self.trust_env = False\n+\n+ def send(self, *args, **kwargs):\n+ self.calls.append(SendCall(args, kwargs))\n+ return self.build_response()\n+\n+ def build_response(self):\n+ request = self.calls[-1].args[0]\n+ r = requests.Response()\n+\n+ try:\n+ r.status_code = int(self.redirects.pop(0))\n+ except IndexError:\n+ r.status_code = 200\n+\n+ r.headers = CaseInsensitiveDict({'Location': '/'})\n+ r.raw = self._build_raw()\n+ r.request = request\n+ return r\n+\n+ def _build_raw(self):\n+ string = StringIO.StringIO('')\n+ setattr(string, 'release_conn', lambda *args: args)\n+ return string\n+\n+\n+class TestRedirects:\n+ default_keyword_args = {\n+ 'stream': False,\n+ 'verify': True,\n+ 'cert': None,\n+ 'timeout': None,\n+ 'allow_redirects': False,\n+ 'proxies': None,\n+ }\n+\n+ def test_requests_are_updated_each_time(self):\n+ session = RedirectSession([303, 307])\n+ prep = requests.Request('POST', 'http://httpbin.org/post').prepare()\n+ r0 = session.send(prep)\n+ assert r0.request.method == 'POST'\n+ assert session.calls[-1] == SendCall((r0.request,), {})\n+ redirect_generator = session.resolve_redirects(r0, prep)\n+ for response in redirect_generator:\n+ assert response.request.method == 'GET'\n+ send_call = SendCall((response.request,),\n+ TestRedirects.default_keyword_args)\n+ assert session.calls[-1] == send_call\n+\n+\n if __name__ == '__main__':\n unittest.main()\n", + "fail_to_pass": "[\"test_requests.py::RequestsTestCase::test_DIGESTAUTH_QUOTES_QOP_VALUE\", \"test_requests.py::RequestsTestCase::test_DIGESTAUTH_WRONG_HTTP_401_GET\", \"test_requests.py::RequestsTestCase::test_DIGEST_AUTH_RETURNS_COOKIE\", \"test_requests.py::RequestsTestCase::test_DIGEST_HTTP_200_OK_GET\", \"test_requests.py::RequestsTestCase::test_POSTBIN_GET_POST_FILES_WITH_DATA\", \"test_requests.py::RequestsTestCase::test_param_cookiejar_works\", \"test_requests.py::TestRedirects::test_requests_are_updated_each_time\"]", + "pass_to_pass": "[\"test_requests.py::RequestsTestCase::test_BASICAUTH_TUPLE_HTTP_200_OK_GET\", \"test_requests.py::RequestsTestCase::test_DIGEST_AUTH_SETS_SESSION_COOKIES\", \"test_requests.py::RequestsTestCase::test_DIGEST_STREAM\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_GET_ALTERNATIVE\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_GET_WITH_MIXED_PARAMS\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_GET_WITH_PARAMS\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_HEAD\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_PUT\", \"test_requests.py::RequestsTestCase::test_HTTP_302_ALLOW_REDIRECT_GET\", \"test_requests.py::RequestsTestCase::test_POSTBIN_GET_POST_FILES\", \"test_requests.py::RequestsTestCase::test_autoset_header_values_are_native\", \"test_requests.py::RequestsTestCase::test_basic_building\", \"test_requests.py::RequestsTestCase::test_basicauth_with_netrc\", \"test_requests.py::RequestsTestCase::test_can_send_nonstring_objects_with_files\", \"test_requests.py::RequestsTestCase::test_cannot_send_unprepared_requests\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_items\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_keeps_items\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_keeps_len\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_keys\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_values\", \"test_requests.py::RequestsTestCase::test_cookie_parameters\", \"test_requests.py::RequestsTestCase::test_cookie_persists_via_api\", \"test_requests.py::RequestsTestCase::test_cookie_quote_wrapped\", \"test_requests.py::RequestsTestCase::test_cookie_removed_on_expire\", \"test_requests.py::RequestsTestCase::test_cookie_sent_on_redirect\", \"test_requests.py::RequestsTestCase::test_custom_content_type\", \"test_requests.py::RequestsTestCase::test_decompress_gzip\", \"test_requests.py::RequestsTestCase::test_different_encodings_dont_break_post\", \"test_requests.py::RequestsTestCase::test_entry_points\", \"test_requests.py::RequestsTestCase::test_fixes_1329\", \"test_requests.py::RequestsTestCase::test_generic_cookiejar_works\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url_encoded_hashes\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url_encoded_spaces\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url_not_encoded_spaces\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url_percent_chars\", \"test_requests.py::RequestsTestCase::test_header_keys_are_native\", \"test_requests.py::RequestsTestCase::test_header_remove_is_case_insensitive\", \"test_requests.py::RequestsTestCase::test_headers_on_session_with_None_are_not_sent\", \"test_requests.py::RequestsTestCase::test_hook_receives_request_arguments\", \"test_requests.py::RequestsTestCase::test_http_error\", \"test_requests.py::RequestsTestCase::test_invalid_url\", \"test_requests.py::RequestsTestCase::test_links\", \"test_requests.py::RequestsTestCase::test_long_authinfo_in_url\", \"test_requests.py::RequestsTestCase::test_no_content_length\", \"test_requests.py::RequestsTestCase::test_oddball_schemes_dont_check_URLs\", \"test_requests.py::RequestsTestCase::test_params_are_added_before_fragment\", \"test_requests.py::RequestsTestCase::test_params_are_merged_case_sensitive\", \"test_requests.py::RequestsTestCase::test_path_is_not_double_encoded\", \"test_requests.py::RequestsTestCase::test_prepared_from_session\", \"test_requests.py::RequestsTestCase::test_prepared_request_hook\", \"test_requests.py::RequestsTestCase::test_pyopenssl_redirect\", \"test_requests.py::RequestsTestCase::test_request_and_response_are_pickleable\", \"test_requests.py::RequestsTestCase::test_request_cookie_overrides_session_cookie\", \"test_requests.py::RequestsTestCase::test_request_cookies_not_persisted\", \"test_requests.py::RequestsTestCase::test_request_ok_set\", \"test_requests.py::RequestsTestCase::test_requests_in_history_are_not_overridden\", \"test_requests.py::RequestsTestCase::test_response_is_iterable\", \"test_requests.py::RequestsTestCase::test_session_hooks_are_overriden_by_request_hooks\", \"test_requests.py::RequestsTestCase::test_session_hooks_are_used_with_no_request_hooks\", \"test_requests.py::RequestsTestCase::test_session_pickling\", \"test_requests.py::RequestsTestCase::test_set_cookie_on_301\", \"test_requests.py::RequestsTestCase::test_status_raising\", \"test_requests.py::RequestsTestCase::test_time_elapsed_blank\", \"test_requests.py::RequestsTestCase::test_transport_adapter_ordering\", \"test_requests.py::RequestsTestCase::test_unicode_get\", \"test_requests.py::RequestsTestCase::test_unicode_header_name\", \"test_requests.py::RequestsTestCase::test_unicode_method_name\", \"test_requests.py::RequestsTestCase::test_unicode_multipart_post\", \"test_requests.py::RequestsTestCase::test_unicode_multipart_post_fieldnames\", \"test_requests.py::RequestsTestCase::test_uppercase_scheme_redirect\", \"test_requests.py::RequestsTestCase::test_urlencoded_get_query_multivalued_param\", \"test_requests.py::RequestsTestCase::test_user_agent_transfers\", \"test_requests.py::TestContentEncodingDetection::test_html4_pragma\", \"test_requests.py::TestContentEncodingDetection::test_html_charset\", \"test_requests.py::TestContentEncodingDetection::test_none\", \"test_requests.py::TestContentEncodingDetection::test_precedence\", \"test_requests.py::TestContentEncodingDetection::test_xhtml_pragma\", \"test_requests.py::TestContentEncodingDetection::test_xml\", \"test_requests.py::TestCaseInsensitiveDict::test_contains\", \"test_requests.py::TestCaseInsensitiveDict::test_delitem\", \"test_requests.py::TestCaseInsensitiveDict::test_docstring_example\", \"test_requests.py::TestCaseInsensitiveDict::test_equality\", \"test_requests.py::TestCaseInsensitiveDict::test_fixes_649\", \"test_requests.py::TestCaseInsensitiveDict::test_get\", \"test_requests.py::TestCaseInsensitiveDict::test_getitem\", \"test_requests.py::TestCaseInsensitiveDict::test_iter\", \"test_requests.py::TestCaseInsensitiveDict::test_iterable_init\", \"test_requests.py::TestCaseInsensitiveDict::test_kwargs_init\", \"test_requests.py::TestCaseInsensitiveDict::test_len\", \"test_requests.py::TestCaseInsensitiveDict::test_lower_items\", \"test_requests.py::TestCaseInsensitiveDict::test_mapping_init\", \"test_requests.py::TestCaseInsensitiveDict::test_preserve_key_case\", \"test_requests.py::TestCaseInsensitiveDict::test_preserve_last_key_case\", \"test_requests.py::TestCaseInsensitiveDict::test_setdefault\", \"test_requests.py::TestCaseInsensitiveDict::test_update\", \"test_requests.py::TestCaseInsensitiveDict::test_update_retains_unchanged\", \"test_requests.py::UtilsTestCase::test_address_in_network\", \"test_requests.py::UtilsTestCase::test_dotted_netmask\", \"test_requests.py::UtilsTestCase::test_get_auth_from_url\", \"test_requests.py::UtilsTestCase::test_get_environ_proxies\", \"test_requests.py::UtilsTestCase::test_get_environ_proxies_ip_ranges\", \"test_requests.py::UtilsTestCase::test_is_ipv4_address\", \"test_requests.py::UtilsTestCase::test_is_valid_cidr\", \"test_requests.py::UtilsTestCase::test_super_len_io_streams\", \"test_requests.py::TestMorselToCookieExpires::test_expires_invalid_int\", \"test_requests.py::TestMorselToCookieExpires::test_expires_invalid_str\", \"test_requests.py::TestMorselToCookieExpires::test_expires_none\", \"test_requests.py::TestMorselToCookieExpires::test_expires_valid_str\", \"test_requests.py::TestMorselToCookieMaxAge::test_max_age_invalid_str\", \"test_requests.py::TestMorselToCookieMaxAge::test_max_age_valid_int\", \"test_requests.py::TestTimeout::test_stream_timeout\"]", "expected_spans": { - "sympy/functions/elementary/trigonometric.py": [ - "sin._eval_rewrite_as_sqrt", - "sinc.eval" + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects" ] }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-11897", - "repo": "sympy/sympy", - "base_commit": "e2918c1205c47345eb73c9be68b14c0f15fdeb17", - "problem_statement": "LaTeX printer inconsistent with pretty printer\nThe LaTeX printer should always give the same output as the pretty printer, unless better output is possible from LaTeX. In some cases it is inconsistent. For instance:\n\n``` py\nIn [9]: var('x', positive=True)\nOut[9]: x\n\nIn [10]: latex(exp(-x)*log(x))\nOut[10]: '\\\\frac{1}{e^{x}} \\\\log{\\\\left (x \\\\right )}'\n\nIn [11]: pprint(exp(-x)*log(x))\n -x\n\u212f \u22c5log(x)\n```\n\n(I also don't think the assumptions should affect printing). \n\n``` py\nIn [14]: var('x y')\nOut[14]: (x, y)\n\nIn [15]: latex(1/(x + y)/2)\nOut[15]: '\\\\frac{1}{2 x + 2 y}'\n\nIn [16]: pprint(1/(x + y)/2)\n 1\n\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n2\u22c5(x + y)\n```\n\n", - "golden_patch": "diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -235,10 +235,12 @@ def _needs_mul_brackets(self, expr, first=False, last=False):\n elif expr.is_Mul:\n if not first and _coeff_isneg(expr):\n return True\n+ if expr.is_Piecewise:\n+ return True\n if any([expr.has(x) for x in (Mod,)]):\n return True\n if (not last and\n- any([expr.has(x) for x in (Integral, Piecewise, Product, Sum)])):\n+ any([expr.has(x) for x in (Integral, Product, Sum)])):\n return True\n \n return False\n", - "expected_spans": { - "sympy/printing/latex.py": [ - "LatexPrinter._needs_mul_brackets" + "test_file_spans": { + "test_requests.py": [ + "imports", + "impl:9" ] }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-12171", - "repo": "sympy/sympy", - "base_commit": "ca6ef27272be31c9dc3753ede9232c39df9a75d8", - "problem_statement": "matematica code printer does not handle floats and derivatives correctly\nIn its current state the mathematica code printer does not handle Derivative(func(vars), deriver) \r\ne.g. Derivative(f(t), t) yields Derivative(f(t), t) instead of D[f[t],t]\r\n\r\nAlso floats with exponents are not handled correctly e.g. 1.0e-4 is not converted to 1.0*^-4\r\n\r\nThis has an easy fix by adding the following lines to MCodePrinter:\r\n\r\n\r\ndef _print_Derivative(self, expr):\r\n return \"D[%s]\" % (self.stringify(expr.args, \", \"))\r\n\r\ndef _print_Float(self, expr):\r\n res =str(expr)\r\n return res.replace('e','*^') \r\n\r\n\r\n\n", - "golden_patch": "diff --git a/sympy/printing/mathematica.py b/sympy/printing/mathematica.py\n--- a/sympy/printing/mathematica.py\n+++ b/sympy/printing/mathematica.py\n@@ -109,6 +109,9 @@ def _print_Integral(self, expr):\n def _print_Sum(self, expr):\n return \"Hold[Sum[\" + ', '.join(self.doprint(a) for a in expr.args) + \"]]\"\n \n+ def _print_Derivative(self, expr):\n+ return \"Hold[D[\" + ', '.join(self.doprint(a) for a in expr.args) + \"]]\"\n+\n \n def mathematica_code(expr, **settings):\n r\"\"\"Converts an expr to a string of the Wolfram Mathematica code\n", - "expected_spans": { - "sympy/printing/mathematica.py": [] - }, - "resolved_by": [], + "resolved_by": [ + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "requests/models.py": [ + "RequestHooksMixin.register_hook" + ], + "requests/packages/urllib3/_collections.py": [ + "docstring" + ], + "requests/sessions.py": [ + "imports", + "SessionRedirectMixin.resolve_redirects" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "imports", + "SessionRedirectMixin.resolve_redirects" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects", + "SessionRedirectMixin" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects", + "SessionRedirectMixin" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "requests/sessions.py": [ + "docstring", + "imports", + "SessionRedirectMixin.resolve_redirects" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "docstring", + "imports", + "SessionRedirectMixin.resolve_redirects" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects", + "SessionRedirectMixin" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects", + "SessionRedirectMixin" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects", + "SessionRedirectMixin" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "SessionRedirectMixin.resolve_redirects", + "SessionRedirectMixin" + ] + } + } + ], "alternative_spans": [] }, { - "instance_id": "sympy__sympy-12236", - "repo": "sympy/sympy", - "base_commit": "d60497958f6dea7f5e25bc41e9107a6a63694d01", - "problem_statement": "Wrong result with apart\n```\r\nPython 3.6.0 |Continuum Analytics, Inc.| (default, Dec 23 2016, 12:22:00) \r\nType \"copyright\", \"credits\" or \"license\" for more information.\r\n\r\nIPython 5.1.0 -- An enhanced Interactive Python.\r\n? -> Introduction and overview of IPython's features.\r\n%quickref -> Quick reference.\r\nhelp -> Python's own help system.\r\nobject? -> Details about 'object', use 'object??' for extra details.\r\n\r\nIn [1]: from sympy import symbols\r\n\r\nIn [2]: a = symbols('a', real=True)\r\n\r\nIn [3]: t = symbols('t', real=True, negative=False)\r\n\r\nIn [4]: bug = a * (-t + (-t + 1) * (2 * t - 1)) / (2 * t - 1)\r\n\r\nIn [5]: bug.subs(a, 1)\r\nOut[5]: (-t + (-t + 1)*(2*t - 1))/(2*t - 1)\r\n\r\nIn [6]: bug.subs(a, 1).apart()\r\nOut[6]: -t + 1/2 - 1/(2*(2*t - 1))\r\n\r\nIn [7]: bug.subs(a, 1).apart(t)\r\nOut[7]: -t + 1/2 - 1/(2*(2*t - 1))\r\n\r\nIn [8]: bug.apart(t)\r\nOut[8]: -a*t\r\n\r\nIn [9]: import sympy; sympy.__version__\r\nOut[9]: '1.0'\r\n```\nWrong result with apart\n```\r\nPython 3.6.0 |Continuum Analytics, Inc.| (default, Dec 23 2016, 12:22:00) \r\nType \"copyright\", \"credits\" or \"license\" for more information.\r\n\r\nIPython 5.1.0 -- An enhanced Interactive Python.\r\n? -> Introduction and overview of IPython's features.\r\n%quickref -> Quick reference.\r\nhelp -> Python's own help system.\r\nobject? -> Details about 'object', use 'object??' for extra details.\r\n\r\nIn [1]: from sympy import symbols\r\n\r\nIn [2]: a = symbols('a', real=True)\r\n\r\nIn [3]: t = symbols('t', real=True, negative=False)\r\n\r\nIn [4]: bug = a * (-t + (-t + 1) * (2 * t - 1)) / (2 * t - 1)\r\n\r\nIn [5]: bug.subs(a, 1)\r\nOut[5]: (-t + (-t + 1)*(2*t - 1))/(2*t - 1)\r\n\r\nIn [6]: bug.subs(a, 1).apart()\r\nOut[6]: -t + 1/2 - 1/(2*(2*t - 1))\r\n\r\nIn [7]: bug.subs(a, 1).apart(t)\r\nOut[7]: -t + 1/2 - 1/(2*(2*t - 1))\r\n\r\nIn [8]: bug.apart(t)\r\nOut[8]: -a*t\r\n\r\nIn [9]: import sympy; sympy.__version__\r\nOut[9]: '1.0'\r\n```\n", - "golden_patch": "diff --git a/sympy/polys/domains/polynomialring.py b/sympy/polys/domains/polynomialring.py\n--- a/sympy/polys/domains/polynomialring.py\n+++ b/sympy/polys/domains/polynomialring.py\n@@ -104,10 +104,10 @@ def from_PolynomialRing(K1, a, K0):\n \n def from_FractionField(K1, a, K0):\n \"\"\"Convert a rational function to ``dtype``. \"\"\"\n- denom = K0.denom(a)\n+ q, r = K0.numer(a).div(K0.denom(a))\n \n- if denom.is_ground:\n- return K1.from_PolynomialRing(K0.numer(a)/denom, K0.field.ring.to_domain())\n+ if r.is_zero:\n+ return K1.from_PolynomialRing(q, K0.field.ring.to_domain())\n else:\n return None\n \n", + "instance_id": "psf__requests-2148", + "repo": "psf/requests", + "base_commit": "fe693c492242ae532211e0c173324f09ca8cf227", + "problem_statement": "socket.error exception not caught/wrapped in a requests exception (ConnectionError perhaps?)\nI just noticed a case where I had a socket reset on me, and was raised to me as a raw socket error as opposed to something like a requests.exceptions.ConnectionError:\n\n```\n File \"/home/rtdean/***/***/***/***/***/***.py\", line 67, in dir_parse\n root = ElementTree.fromstring(response.text)\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/site-packages/requests-2.3.0-py2.7.egg/requests/models.py\", line 721, in text\n if not self.content:\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/site-packages/requests-2.3.0-py2.7.egg/requests/models.py\", line 694, in content\n self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/site-packages/requests-2.3.0-py2.7.egg/requests/models.py\", line 627, in generate\n for chunk in self.raw.stream(chunk_size, decode_content=True):\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/site-packages/requests-2.3.0-py2.7.egg/requests/packages/urllib3/response.py\", line 240, in stream\n data = self.read(amt=amt, decode_content=decode_content)\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/site-packages/requests-2.3.0-py2.7.egg/requests/packages/urllib3/response.py\", line 187, in read\n data = self._fp.read(amt)\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/httplib.py\", line 543, in read\n return self._read_chunked(amt)\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/httplib.py\", line 612, in _read_chunked\n value.append(self._safe_read(chunk_left))\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/httplib.py\", line 658, in _safe_read\n chunk = self.fp.read(min(amt, MAXAMOUNT))\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/socket.py\", line 380, in read\n data = self._sock.recv(left)\n File \"/home/rtdean/.pyenv/versions/2.7.6/lib/python2.7/site-packages/gevent-1.0.1-py2.7-linux-x86_64.egg/gevent/socket.py\", line 385, in recv\n return sock.recv(*args)\nsocket.error: [Errno 104] Connection reset by peer\n```\n\nNot sure if this is by accident or design... in general, I guess I'd expect a requests exception when using requests, but I can start looking for socket errors and the like as well.\n\n", + "golden_patch": "diff --git a/requests/models.py b/requests/models.py\n--- a/requests/models.py\n+++ b/requests/models.py\n@@ -9,6 +9,7 @@\n \n import collections\n import datetime\n+import socket\n \n from io import BytesIO, UnsupportedOperation\n from .hooks import default_hooks\n@@ -22,7 +23,7 @@\n from .packages.urllib3.exceptions import DecodeError\n from .exceptions import (\n HTTPError, RequestException, MissingSchema, InvalidURL,\n- ChunkedEncodingError, ContentDecodingError)\n+ ChunkedEncodingError, ContentDecodingError, ConnectionError)\n from .utils import (\n guess_filename, get_auth_from_url, requote_uri,\n stream_decode_response_unicode, to_key_val_list, parse_header_links,\n@@ -640,6 +641,8 @@ def generate():\n raise ChunkedEncodingError(e)\n except DecodeError as e:\n raise ContentDecodingError(e)\n+ except socket.error as e:\n+ raise ConnectionError(e)\n except AttributeError:\n # Standard file-like object.\n while True:\n", + "test_patch": "diff --git a/test_requests.py b/test_requests.py\n--- a/test_requests.py\n+++ b/test_requests.py\n@@ -18,7 +18,7 @@\n from requests.compat import (\n Morsel, cookielib, getproxies, str, urljoin, urlparse, is_py3, builtin_str)\n from requests.cookies import cookiejar_from_dict, morsel_to_cookie\n-from requests.exceptions import InvalidURL, MissingSchema\n+from requests.exceptions import InvalidURL, MissingSchema, ConnectionError\n from requests.models import PreparedRequest\n from requests.structures import CaseInsensitiveDict\n from requests.sessions import SessionRedirectMixin\n@@ -720,6 +720,18 @@ def read_mock(amt, decode_content=None):\n assert next(iter(r))\n io.close()\n \n+ def test_iter_content_handles_socket_error(self):\n+ r = requests.Response()\n+ import socket\n+\n+ class RawMock(object):\n+ def stream(self, chunk_size, decode_content=None):\n+ raise socket.error()\n+\n+ r.raw = RawMock()\n+ with pytest.raises(ConnectionError):\n+ list(r.iter_content())\n+\n def test_response_decode_unicode(self):\n \"\"\"\n When called with decode_unicode, Response.iter_content should always\n", + "fail_to_pass": "[\"test_requests.py::RequestsTestCase::test_DIGEST_AUTH_RETURNS_COOKIE\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_GET_ALTERNATIVE\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_HEAD\", \"test_requests.py::RequestsTestCase::test_POSTBIN_GET_POST_FILES\", \"test_requests.py::RequestsTestCase::test_auth_is_stripped_on_redirect_off_host\", \"test_requests.py::RequestsTestCase::test_basicauth_with_netrc\", \"test_requests.py::RequestsTestCase::test_cookie_quote_wrapped\", \"test_requests.py::RequestsTestCase::test_generic_cookiejar_works\", \"test_requests.py::RequestsTestCase::test_iter_content_handles_socket_error\", \"test_requests.py::RequestsTestCase::test_unicode_multipart_post\"]", + "pass_to_pass": "[\"test_requests.py::RequestsTestCase::test_BASICAUTH_TUPLE_HTTP_200_OK_GET\", \"test_requests.py::RequestsTestCase::test_DIGESTAUTH_QUOTES_QOP_VALUE\", \"test_requests.py::RequestsTestCase::test_DIGESTAUTH_WRONG_HTTP_401_GET\", \"test_requests.py::RequestsTestCase::test_DIGEST_AUTH_SETS_SESSION_COOKIES\", \"test_requests.py::RequestsTestCase::test_DIGEST_STREAM\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_GET_WITH_MIXED_PARAMS\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_GET_WITH_PARAMS\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_PUT\", \"test_requests.py::RequestsTestCase::test_HTTP_302_ALLOW_REDIRECT_GET\", \"test_requests.py::RequestsTestCase::test_auth_is_retained_for_redirect_on_host\", \"test_requests.py::RequestsTestCase::test_autoset_header_values_are_native\", \"test_requests.py::RequestsTestCase::test_basic_auth_str_is_always_native\", \"test_requests.py::RequestsTestCase::test_basic_building\", \"test_requests.py::RequestsTestCase::test_can_send_nonstring_objects_with_files\", \"test_requests.py::RequestsTestCase::test_cannot_send_unprepared_requests\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_items\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_keeps_items\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_keeps_len\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_keys\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_values\", \"test_requests.py::RequestsTestCase::test_cookie_parameters\", \"test_requests.py::RequestsTestCase::test_cookie_persists_via_api\", \"test_requests.py::RequestsTestCase::test_cookie_removed_on_expire\", \"test_requests.py::RequestsTestCase::test_cookie_sent_on_redirect\", \"test_requests.py::RequestsTestCase::test_custom_content_type\", \"test_requests.py::RequestsTestCase::test_decompress_gzip\", \"test_requests.py::RequestsTestCase::test_different_encodings_dont_break_post\", \"test_requests.py::RequestsTestCase::test_entry_points\", \"test_requests.py::RequestsTestCase::test_fixes_1329\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url_encoded_hashes\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url_encoded_spaces\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url_not_encoded_spaces\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url_percent_chars\", \"test_requests.py::RequestsTestCase::test_header_keys_are_native\", \"test_requests.py::RequestsTestCase::test_header_remove_is_case_insensitive\", \"test_requests.py::RequestsTestCase::test_headers_on_session_with_None_are_not_sent\", \"test_requests.py::RequestsTestCase::test_history_is_always_a_list\", \"test_requests.py::RequestsTestCase::test_hook_receives_request_arguments\", \"test_requests.py::RequestsTestCase::test_http_error\", \"test_requests.py::RequestsTestCase::test_invalid_url\", \"test_requests.py::RequestsTestCase::test_links\", \"test_requests.py::RequestsTestCase::test_long_authinfo_in_url\", \"test_requests.py::RequestsTestCase::test_manual_redirect_with_partial_body_read\", \"test_requests.py::RequestsTestCase::test_mixed_case_scheme_acceptable\", \"test_requests.py::RequestsTestCase::test_no_content_length\", \"test_requests.py::RequestsTestCase::test_oddball_schemes_dont_check_URLs\", \"test_requests.py::RequestsTestCase::test_param_cookiejar_works\", \"test_requests.py::RequestsTestCase::test_params_are_added_before_fragment\", \"test_requests.py::RequestsTestCase::test_params_are_merged_case_sensitive\", \"test_requests.py::RequestsTestCase::test_path_is_not_double_encoded\", \"test_requests.py::RequestsTestCase::test_prepared_from_session\", \"test_requests.py::RequestsTestCase::test_prepared_request_hook\", \"test_requests.py::RequestsTestCase::test_pyopenssl_redirect\", \"test_requests.py::RequestsTestCase::test_redirect_with_wrong_gzipped_header\", \"test_requests.py::RequestsTestCase::test_request_and_response_are_pickleable\", \"test_requests.py::RequestsTestCase::test_request_cookies_not_persisted\", \"test_requests.py::RequestsTestCase::test_request_ok_set\", \"test_requests.py::RequestsTestCase::test_requests_in_history_are_not_overridden\", \"test_requests.py::RequestsTestCase::test_response_decode_unicode\", \"test_requests.py::RequestsTestCase::test_response_is_iterable\", \"test_requests.py::RequestsTestCase::test_session_hooks_are_overriden_by_request_hooks\", \"test_requests.py::RequestsTestCase::test_session_hooks_are_used_with_no_request_hooks\", \"test_requests.py::RequestsTestCase::test_session_pickling\", \"test_requests.py::RequestsTestCase::test_set_cookie_on_301\", \"test_requests.py::RequestsTestCase::test_status_raising\", \"test_requests.py::RequestsTestCase::test_time_elapsed_blank\", \"test_requests.py::RequestsTestCase::test_transport_adapter_ordering\", \"test_requests.py::RequestsTestCase::test_unicode_get\", \"test_requests.py::RequestsTestCase::test_unicode_header_name\", \"test_requests.py::RequestsTestCase::test_unicode_multipart_post_fieldnames\", \"test_requests.py::RequestsTestCase::test_uppercase_scheme_redirect\", \"test_requests.py::RequestsTestCase::test_urlencoded_get_query_multivalued_param\", \"test_requests.py::RequestsTestCase::test_user_agent_transfers\", \"test_requests.py::TestContentEncodingDetection::test_html4_pragma\", \"test_requests.py::TestContentEncodingDetection::test_html_charset\", \"test_requests.py::TestContentEncodingDetection::test_none\", \"test_requests.py::TestContentEncodingDetection::test_precedence\", \"test_requests.py::TestContentEncodingDetection::test_xhtml_pragma\", \"test_requests.py::TestContentEncodingDetection::test_xml\", \"test_requests.py::TestCaseInsensitiveDict::test_contains\", \"test_requests.py::TestCaseInsensitiveDict::test_delitem\", \"test_requests.py::TestCaseInsensitiveDict::test_docstring_example\", \"test_requests.py::TestCaseInsensitiveDict::test_equality\", \"test_requests.py::TestCaseInsensitiveDict::test_fixes_649\", \"test_requests.py::TestCaseInsensitiveDict::test_get\", \"test_requests.py::TestCaseInsensitiveDict::test_getitem\", \"test_requests.py::TestCaseInsensitiveDict::test_iter\", \"test_requests.py::TestCaseInsensitiveDict::test_iterable_init\", \"test_requests.py::TestCaseInsensitiveDict::test_kwargs_init\", \"test_requests.py::TestCaseInsensitiveDict::test_len\", \"test_requests.py::TestCaseInsensitiveDict::test_lower_items\", \"test_requests.py::TestCaseInsensitiveDict::test_mapping_init\", \"test_requests.py::TestCaseInsensitiveDict::test_preserve_key_case\", \"test_requests.py::TestCaseInsensitiveDict::test_preserve_last_key_case\", \"test_requests.py::TestCaseInsensitiveDict::test_setdefault\", \"test_requests.py::TestCaseInsensitiveDict::test_update\", \"test_requests.py::TestCaseInsensitiveDict::test_update_retains_unchanged\", \"test_requests.py::UtilsTestCase::test_address_in_network\", \"test_requests.py::UtilsTestCase::test_dotted_netmask\", \"test_requests.py::UtilsTestCase::test_get_auth_from_url\", \"test_requests.py::UtilsTestCase::test_get_environ_proxies\", \"test_requests.py::UtilsTestCase::test_get_environ_proxies_ip_ranges\", \"test_requests.py::UtilsTestCase::test_is_ipv4_address\", \"test_requests.py::UtilsTestCase::test_is_valid_cidr\", \"test_requests.py::UtilsTestCase::test_super_len_io_streams\", \"test_requests.py::TestMorselToCookieExpires::test_expires_invalid_int\", \"test_requests.py::TestMorselToCookieExpires::test_expires_invalid_str\", \"test_requests.py::TestMorselToCookieExpires::test_expires_none\", \"test_requests.py::TestMorselToCookieExpires::test_expires_valid_str\", \"test_requests.py::TestMorselToCookieMaxAge::test_max_age_invalid_str\", \"test_requests.py::TestMorselToCookieMaxAge::test_max_age_valid_int\", \"test_requests.py::TestTimeout::test_stream_timeout\", \"test_requests.py::TestRedirects::test_requests_are_updated_each_time\", \"test_requests.py::test_data_argument_accepts_tuples\", \"test_requests.py::test_prepared_request_empty_copy\", \"test_requests.py::test_prepared_request_no_cookies_copy\", \"test_requests.py::test_prepared_request_complete_copy\"]", "expected_spans": { - "sympy/polys/domains/polynomialring.py": [ - "PolynomialRing.from_FractionField" + "requests/models.py": [ + "imports", + "Response.iter_content" ] }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-12419", - "repo": "sympy/sympy", - "base_commit": "479939f8c65c8c2908bbedc959549a257a7c0b0b", - "problem_statement": "Sum of the elements of an identity matrix is zero\nI think this is a bug.\r\n\r\nI created a matrix by M.T * M under an assumption that M is orthogonal. SymPy successfully recognized that the result is an identity matrix. I tested its identity-ness by element-wise, queries, and sum of the diagonal elements and received expected results.\r\n\r\nHowever, when I attempt to evaluate the total sum of the elements the result was 0 while 'n' is expected.\r\n\r\n```\r\nfrom sympy import *\r\nfrom sympy import Q as Query\r\n\r\nn = Symbol('n', integer=True, positive=True)\r\ni, j = symbols('i j', integer=True)\r\nM = MatrixSymbol('M', n, n)\r\n\r\ne = None\r\nwith assuming(Query.orthogonal(M)):\r\n e = refine((M.T * M).doit())\r\n\r\n# Correct: M.T * M is an identity matrix.\r\nprint(e, e[0, 0], e[0, 1], e[1, 0], e[1, 1])\r\n\r\n# Correct: The output is True True\r\nprint(ask(Query.diagonal(e)), ask(Query.integer_elements(e)))\r\n\r\n# Correct: The sum of the diagonal elements is n\r\nprint(Sum(e[i, i], (i, 0, n-1)).doit())\r\n\r\n# So far so good\r\n# Total sum of the elements is expected to be 'n' but the answer is 0!\r\nprint(Sum(Sum(e[i, j], (i, 0, n-1)), (j, 0, n-1)).doit())\r\n```\n", - "golden_patch": "diff --git a/sympy/matrices/expressions/matexpr.py b/sympy/matrices/expressions/matexpr.py\n--- a/sympy/matrices/expressions/matexpr.py\n+++ b/sympy/matrices/expressions/matexpr.py\n@@ -2,11 +2,12 @@\n \n from functools import wraps\n \n-from sympy.core import S, Symbol, Tuple, Integer, Basic, Expr\n+from sympy.core import S, Symbol, Tuple, Integer, Basic, Expr, Eq\n from sympy.core.decorators import call_highest_priority\n from sympy.core.compatibility import range\n from sympy.core.sympify import SympifyError, sympify\n from sympy.functions import conjugate, adjoint\n+from sympy.functions.special.tensor_functions import KroneckerDelta\n from sympy.matrices import ShapeError\n from sympy.simplify import simplify\n \n@@ -375,7 +376,6 @@ def _eval_derivative(self, v):\n if self.args[0] != v.args[0]:\n return S.Zero\n \n- from sympy import KroneckerDelta\n return KroneckerDelta(self.args[1], v.args[1])*KroneckerDelta(self.args[2], v.args[2])\n \n \n@@ -476,10 +476,12 @@ def conjugate(self):\n return self\n \n def _entry(self, i, j):\n- if i == j:\n+ eq = Eq(i, j)\n+ if eq is S.true:\n return S.One\n- else:\n+ elif eq is S.false:\n return S.Zero\n+ return KroneckerDelta(i, j)\n \n def _eval_determinant(self):\n return S.One\n", - "expected_spans": { - "sympy/matrices/expressions/matexpr.py": [ + "test_file_spans": { + "test_requests.py": [ "imports", - "MatrixElement._eval_derivative", - "Identity._entry" + "RequestsTestCase.test_response_decode_unicode" ] }, "resolved_by": [], "alternative_spans": [] }, { - "instance_id": "sympy__sympy-12454", - "repo": "sympy/sympy", - "base_commit": "d3fcdb72bfcbb560eb45264ac1c03f359436edef", - "problem_statement": "is_upper() raises IndexError for tall matrices\nThe function Matrix.is_upper raises an IndexError for a 4x2 matrix of zeros.\r\n```\r\n>>> sympy.zeros(4,2).is_upper\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"sympy/matrices/matrices.py\", line 1112, in is_upper\r\n for i in range(1, self.rows)\r\n File \"sympy/matrices/matrices.py\", line 1113, in \r\n for j in range(i))\r\n File \"sympy/matrices/dense.py\", line 119, in __getitem__\r\n return self.extract(i, j)\r\n File \"sympy/matrices/matrices.py\", line 352, in extract\r\n colsList = [a2idx(k, self.cols) for k in colsList]\r\n File \"sympy/matrices/matrices.py\", line 5261, in a2idx\r\n raise IndexError(\"Index out of range: a[%s]\" % (j,))\r\nIndexError: Index out of range: a[2]\r\n```\r\nThe code for is_upper() is\r\n```\r\n return all(self[i, j].is_zero\r\n for i in range(1, self.rows)\r\n for j in range(i))\r\n```\r\nFor a 4x2 matrix, is_upper iterates over the indices:\r\n```\r\n>>> A = sympy.zeros(4, 2)\r\n>>> print tuple([i, j] for i in range(1, A.rows) for j in range(i))\r\n([1, 0], [2, 0], [2, 1], [3, 0], [3, 1], [3, 2])\r\n```\r\nThe attempt to index the (3,2) entry appears to be the source of the error. \n", - "golden_patch": "diff --git a/sympy/matrices/matrices.py b/sympy/matrices/matrices.py\n--- a/sympy/matrices/matrices.py\n+++ b/sympy/matrices/matrices.py\n@@ -641,7 +641,7 @@ def _eval_is_zero(self):\n def _eval_is_upper_hessenberg(self):\n return all(self[i, j].is_zero\n for i in range(2, self.rows)\n- for j in range(i - 1))\n+ for j in range(min(self.cols, (i - 1))))\n \n def _eval_values(self):\n return [i for i in self if not i.is_zero]\n@@ -1112,7 +1112,7 @@ def is_upper(self):\n \"\"\"\n return all(self[i, j].is_zero\n for i in range(1, self.rows)\n- for j in range(i))\n+ for j in range(min(i, self.cols)))\n \n @property\n def is_zero(self):\n", + "instance_id": "psf__requests-2317", + "repo": "psf/requests", + "base_commit": "091991be0da19de9108dbe5e3752917fea3d7fdc", + "problem_statement": "method = builtin_str(method) problem\nIn requests/sessions.py is a command:\n\nmethod = builtin_str(method)\nConverts method from\nb\u2019GET\u2019\nto\n\"b'GET\u2019\"\n\nWhich is the literal string, no longer a binary string. When requests tries to use the method \"b'GET\u2019\u201d, it gets a 404 Not Found response.\n\nI am using python3.4 and python-neutronclient (2.3.9) with requests (2.4.3). neutronclient is broken because it uses this \"args = utils.safe_encode_list(args)\" command which converts all the values to binary string, including method.\n\nI'm not sure if this is a bug with neutronclient or a bug with requests, but I'm starting here. Seems if requests handled the method value being a binary string, we wouldn't have any problem.\n\nAlso, I tried in python2.6 and this bug doesn't exist there. Some difference between 2.6 and 3.4 makes this not work right.\n\n", + "golden_patch": "diff --git a/requests/sessions.py b/requests/sessions.py\n--- a/requests/sessions.py\n+++ b/requests/sessions.py\n@@ -13,7 +13,7 @@\n from datetime import datetime\n \n from .auth import _basic_auth_str\n-from .compat import cookielib, OrderedDict, urljoin, urlparse, builtin_str\n+from .compat import cookielib, OrderedDict, urljoin, urlparse\n from .cookies import (\n cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)\n from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT\n@@ -425,7 +425,7 @@ def request(self, method, url,\n If Tuple, ('cert', 'key') pair.\n \"\"\"\n \n- method = builtin_str(method)\n+ method = to_native_string(method)\n \n # Create the Request.\n req = Request(\n", + "test_patch": "diff --git a/test_requests.py b/test_requests.py\n--- a/test_requests.py\n+++ b/test_requests.py\n@@ -1389,6 +1389,11 @@ def test_total_timeout_connect(self):\n except ConnectTimeout:\n pass\n \n+ def test_encoded_methods(self):\n+ \"\"\"See: https://github.com/kennethreitz/requests/issues/2316\"\"\"\n+ r = requests.request(b'GET', httpbin('get'))\n+ assert r.ok\n+\n \n SendCall = collections.namedtuple('SendCall', ('args', 'kwargs'))\n \n", + "fail_to_pass": "[\"test_requests.py::RequestsTestCase::test_HTTP_302_ALLOW_REDIRECT_GET\", \"test_requests.py::RequestsTestCase::test_POSTBIN_GET_POST_FILES\", \"test_requests.py::RequestsTestCase::test_POSTBIN_GET_POST_FILES_WITH_DATA\", \"test_requests.py::RequestsTestCase::test_basicauth_with_netrc\", \"test_requests.py::RequestsTestCase::test_json_param_post_content_type_works\", \"test_requests.py::RequestsTestCase::test_manual_redirect_with_partial_body_read\", \"test_requests.py::RequestsTestCase::test_requests_history_is_saved\", \"test_requests.py::TestTimeout::test_encoded_methods\"]", + "pass_to_pass": "[\"test_requests.py::RequestsTestCase::test_BASICAUTH_TUPLE_HTTP_200_OK_GET\", \"test_requests.py::RequestsTestCase::test_DIGESTAUTH_QUOTES_QOP_VALUE\", \"test_requests.py::RequestsTestCase::test_DIGESTAUTH_WRONG_HTTP_401_GET\", \"test_requests.py::RequestsTestCase::test_DIGEST_AUTH_RETURNS_COOKIE\", \"test_requests.py::RequestsTestCase::test_DIGEST_AUTH_SETS_SESSION_COOKIES\", \"test_requests.py::RequestsTestCase::test_DIGEST_HTTP_200_OK_GET\", \"test_requests.py::RequestsTestCase::test_DIGEST_STREAM\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_GET_ALTERNATIVE\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_GET_WITH_MIXED_PARAMS\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_GET_WITH_PARAMS\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_HEAD\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_PUT\", \"test_requests.py::RequestsTestCase::test_auth_is_retained_for_redirect_on_host\", \"test_requests.py::RequestsTestCase::test_auth_is_stripped_on_redirect_off_host\", \"test_requests.py::RequestsTestCase::test_autoset_header_values_are_native\", \"test_requests.py::RequestsTestCase::test_basic_auth_str_is_always_native\", \"test_requests.py::RequestsTestCase::test_basic_building\", \"test_requests.py::RequestsTestCase::test_can_send_nonstring_objects_with_files\", \"test_requests.py::RequestsTestCase::test_cannot_send_unprepared_requests\", \"test_requests.py::RequestsTestCase::test_connection_error\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_items\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_keeps_items\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_keeps_len\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_keys\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_values\", \"test_requests.py::RequestsTestCase::test_cookie_parameters\", \"test_requests.py::RequestsTestCase::test_cookie_persists_via_api\", \"test_requests.py::RequestsTestCase::test_cookie_quote_wrapped\", \"test_requests.py::RequestsTestCase::test_cookie_removed_on_expire\", \"test_requests.py::RequestsTestCase::test_cookie_sent_on_redirect\", \"test_requests.py::RequestsTestCase::test_custom_content_type\", \"test_requests.py::RequestsTestCase::test_decompress_gzip\", \"test_requests.py::RequestsTestCase::test_different_encodings_dont_break_post\", \"test_requests.py::RequestsTestCase::test_entry_points\", \"test_requests.py::RequestsTestCase::test_fixes_1329\", \"test_requests.py::RequestsTestCase::test_generic_cookiejar_works\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url_encoded_hashes\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url_encoded_spaces\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url_not_encoded_spaces\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url_percent_chars\", \"test_requests.py::RequestsTestCase::test_header_keys_are_native\", \"test_requests.py::RequestsTestCase::test_header_remove_is_case_insensitive\", \"test_requests.py::RequestsTestCase::test_headers_on_session_with_None_are_not_sent\", \"test_requests.py::RequestsTestCase::test_history_is_always_a_list\", \"test_requests.py::RequestsTestCase::test_hook_receives_request_arguments\", \"test_requests.py::RequestsTestCase::test_http_error\", \"test_requests.py::RequestsTestCase::test_invalid_url\", \"test_requests.py::RequestsTestCase::test_links\", \"test_requests.py::RequestsTestCase::test_long_authinfo_in_url\", \"test_requests.py::RequestsTestCase::test_mixed_case_scheme_acceptable\", \"test_requests.py::RequestsTestCase::test_no_content_length\", \"test_requests.py::RequestsTestCase::test_nonhttp_schemes_dont_check_URLs\", \"test_requests.py::RequestsTestCase::test_param_cookiejar_works\", \"test_requests.py::RequestsTestCase::test_params_are_added_before_fragment\", \"test_requests.py::RequestsTestCase::test_params_are_merged_case_sensitive\", \"test_requests.py::RequestsTestCase::test_path_is_not_double_encoded\", \"test_requests.py::RequestsTestCase::test_prepare_request_with_bytestring_url\", \"test_requests.py::RequestsTestCase::test_prepared_from_session\", \"test_requests.py::RequestsTestCase::test_prepared_request_hook\", \"test_requests.py::RequestsTestCase::test_pyopenssl_redirect\", \"test_requests.py::RequestsTestCase::test_redirect_with_wrong_gzipped_header\", \"test_requests.py::RequestsTestCase::test_request_and_response_are_pickleable\", \"test_requests.py::RequestsTestCase::test_request_cookie_overrides_session_cookie\", \"test_requests.py::RequestsTestCase::test_request_cookies_not_persisted\", \"test_requests.py::RequestsTestCase::test_request_ok_set\", \"test_requests.py::RequestsTestCase::test_requests_in_history_are_not_overridden\", \"test_requests.py::RequestsTestCase::test_response_decode_unicode\", \"test_requests.py::RequestsTestCase::test_response_is_iterable\", \"test_requests.py::RequestsTestCase::test_session_hooks_are_overriden_by_request_hooks\", \"test_requests.py::RequestsTestCase::test_session_hooks_are_used_with_no_request_hooks\", \"test_requests.py::RequestsTestCase::test_session_pickling\", \"test_requests.py::RequestsTestCase::test_set_cookie_on_301\", \"test_requests.py::RequestsTestCase::test_status_raising\", \"test_requests.py::RequestsTestCase::test_time_elapsed_blank\", \"test_requests.py::RequestsTestCase::test_transport_adapter_ordering\", \"test_requests.py::RequestsTestCase::test_unicode_get\", \"test_requests.py::RequestsTestCase::test_unicode_header_name\", \"test_requests.py::RequestsTestCase::test_unicode_method_name\", \"test_requests.py::RequestsTestCase::test_unicode_multipart_post_fieldnames\", \"test_requests.py::RequestsTestCase::test_uppercase_scheme_redirect\", \"test_requests.py::RequestsTestCase::test_urlencoded_get_query_multivalued_param\", \"test_requests.py::RequestsTestCase::test_user_agent_transfers\", \"test_requests.py::TestContentEncodingDetection::test_html4_pragma\", \"test_requests.py::TestContentEncodingDetection::test_html_charset\", \"test_requests.py::TestContentEncodingDetection::test_none\", \"test_requests.py::TestContentEncodingDetection::test_precedence\", \"test_requests.py::TestContentEncodingDetection::test_xhtml_pragma\", \"test_requests.py::TestContentEncodingDetection::test_xml\", \"test_requests.py::TestCaseInsensitiveDict::test_contains\", \"test_requests.py::TestCaseInsensitiveDict::test_delitem\", \"test_requests.py::TestCaseInsensitiveDict::test_docstring_example\", \"test_requests.py::TestCaseInsensitiveDict::test_equality\", \"test_requests.py::TestCaseInsensitiveDict::test_fixes_649\", \"test_requests.py::TestCaseInsensitiveDict::test_get\", \"test_requests.py::TestCaseInsensitiveDict::test_getitem\", \"test_requests.py::TestCaseInsensitiveDict::test_iter\", \"test_requests.py::TestCaseInsensitiveDict::test_iterable_init\", \"test_requests.py::TestCaseInsensitiveDict::test_kwargs_init\", \"test_requests.py::TestCaseInsensitiveDict::test_len\", \"test_requests.py::TestCaseInsensitiveDict::test_lower_items\", \"test_requests.py::TestCaseInsensitiveDict::test_mapping_init\", \"test_requests.py::TestCaseInsensitiveDict::test_preserve_key_case\", \"test_requests.py::TestCaseInsensitiveDict::test_preserve_last_key_case\", \"test_requests.py::TestCaseInsensitiveDict::test_setdefault\", \"test_requests.py::TestCaseInsensitiveDict::test_update\", \"test_requests.py::TestCaseInsensitiveDict::test_update_retains_unchanged\", \"test_requests.py::UtilsTestCase::test_address_in_network\", \"test_requests.py::UtilsTestCase::test_dotted_netmask\", \"test_requests.py::UtilsTestCase::test_get_auth_from_url\", \"test_requests.py::UtilsTestCase::test_get_environ_proxies\", \"test_requests.py::UtilsTestCase::test_get_environ_proxies_ip_ranges\", \"test_requests.py::UtilsTestCase::test_is_ipv4_address\", \"test_requests.py::UtilsTestCase::test_is_valid_cidr\", \"test_requests.py::UtilsTestCase::test_super_len_io_streams\", \"test_requests.py::TestMorselToCookieExpires::test_expires_invalid_int\", \"test_requests.py::TestMorselToCookieExpires::test_expires_invalid_str\", \"test_requests.py::TestMorselToCookieExpires::test_expires_none\", \"test_requests.py::TestMorselToCookieExpires::test_expires_valid_str\", \"test_requests.py::TestMorselToCookieMaxAge::test_max_age_invalid_str\", \"test_requests.py::TestMorselToCookieMaxAge::test_max_age_valid_int\", \"test_requests.py::TestTimeout::test_stream_timeout\", \"test_requests.py::TestTimeout::test_invalid_timeout\", \"test_requests.py::TestTimeout::test_none_timeout\", \"test_requests.py::TestTimeout::test_read_timeout\", \"test_requests.py::TestTimeout::test_connect_timeout\", \"test_requests.py::TestTimeout::test_total_timeout_connect\", \"test_requests.py::TestRedirects::test_requests_are_updated_each_time\", \"test_requests.py::test_data_argument_accepts_tuples\", \"test_requests.py::test_prepared_request_empty_copy\", \"test_requests.py::test_prepared_request_no_cookies_copy\", \"test_requests.py::test_prepared_request_complete_copy\", \"test_requests.py::test_prepare_unicode_url\"]", "expected_spans": { - "sympy/matrices/matrices.py": [ - "MatrixProperties._eval_is_upper_hessenberg", - "MatrixProperties.is_upper" + "requests/sessions.py": [ + "imports", + "Session.request" ] }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-12481", - "repo": "sympy/sympy", - "base_commit": "c807dfe7569692cad24f02a08477b70c1679a4dd", - "problem_statement": "`Permutation` constructor fails with non-disjoint cycles\nCalling `Permutation([[0,1],[0,1]])` raises a `ValueError` instead of constructing the identity permutation. If the cycles passed in are non-disjoint, they should be applied in left-to-right order and the resulting permutation should be returned.\r\n\r\nThis should be easy to compute. I don't see a reason why non-disjoint cycles should be forbidden.\n", - "golden_patch": "diff --git a/sympy/combinatorics/permutations.py b/sympy/combinatorics/permutations.py\n--- a/sympy/combinatorics/permutations.py\n+++ b/sympy/combinatorics/permutations.py\n@@ -895,12 +895,8 @@ def __new__(cls, *args, **kwargs):\n # counting starts from 1.\n \n temp = flatten(args)\n- if has_dups(temp):\n- if is_cycle:\n- raise ValueError('there were repeated elements; to resolve '\n- 'cycles use Cycle%s.' % ''.join([str(tuple(c)) for c in args]))\n- else:\n- raise ValueError('there were repeated elements.')\n+ if has_dups(temp) and not is_cycle:\n+ raise ValueError('there were repeated elements.')\n temp = set(temp)\n \n if not is_cycle and \\\n", - "expected_spans": { - "sympy/combinatorics/permutations.py": [ - "Permutation.__new__" + "test_file_spans": { + "test_requests.py": [ + "TestTimeout" ] }, "resolved_by": [ { - "name": "20240509_amazon-q-developer-agent-20240430-dev", - "patch": "--- a/sympy/combinatorics/permutations.py\n+++ b/sympy/combinatorics/permutations.py\n@@ -894,32 +894,36 @@\n # for convenience in case one wants to enter permutations where\n # counting starts from 1.\n \n temp = flatten(args)\n if has_dups(temp):\n if is_cycle:\n- raise ValueError('there were repeated elements; to resolve '\n- 'cycles use Cycle%s.' % ''.join([str(tuple(c)) for c in args]))\n+ # Allow non-disjoint cycles, apply them left-to-right\n+ c = Cycle()\n+ for ci in args:\n+ c = c(*ci)\n+ aform = c.list()\n else:\n raise ValueError('there were repeated elements.')\n- temp = set(temp)\n-\n- if not is_cycle and \\\n- any(i not in temp for i in range(len(temp))):\n- raise ValueError(\"Integers 0 through %s must be present.\" %\n- max(temp))\n-\n- if is_cycle:\n- # it's not necessarily canonical so we won't store\n- # it -- use the array form instead\n- c = Cycle()\n- for ci in args:\n- c = c(*ci)\n- aform = c.list()\n else:\n- aform = list(args)\n+ temp = set(temp)\n+\n+ if not is_cycle and \\\n+ any(i not in temp for i in range(len(temp))):\n+ raise ValueError(\"Integers 0 through %s must be present.\" %\n+ max(temp))\n+\n+ if is_cycle:\n+ # it's not necessarily canonical so we won't store\n+ # it -- use the array form instead\n+ c = Cycle()\n+ for ci in args:\n+ c = c(*ci)\n+ aform = c.list()\n+ else:\n+ aform = list(args)\n if size and size > len(aform):\n # don't allow for truncation of permutation which\n # might split a cycle and lead to an invalid aform\n # but do allow the permutation size to be increased\n aform.extend(list(range(len(aform), size)))\n size = len(aform)\n" + "name": "20240523_aider", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/sympy/combinatorics/permutations.py b/sympy/combinatorics/permutations.py\nindex 136ed5a478..0c8b3ee111 100644\n--- a/sympy/combinatorics/permutations.py\n+++ b/sympy/combinatorics/permutations.py\n@@ -897,11 +897,12 @@ def __new__(cls, *args, **kwargs):\n temp = flatten(args)\n if has_dups(temp):\n if is_cycle:\n- raise ValueError('there were repeated elements; to resolve '\n- 'cycles use Cycle%s.' % ''.join([str(tuple(c)) for c in args]))\n+ # Allow non-disjoint cycles by not raising an error here\n+ pass\n else:\n raise ValueError('there were repeated elements.')\n temp = set(temp)\n+ temp = set(temp)\n \n if not is_cycle and \\\n any(i not in temp for i in range(len(temp))):\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-13031", - "repo": "sympy/sympy", - "base_commit": "2dfa7457f20ee187fbb09b5b6a1631da4458388c", - "problem_statement": "Behavior of Matrix hstack and vstack changed in sympy 1.1\nIn sympy 1.0:\r\n```\r\nimport sympy as sy\r\nM1 = sy.Matrix.zeros(0, 0)\r\nM2 = sy.Matrix.zeros(0, 1)\r\nM3 = sy.Matrix.zeros(0, 2)\r\nM4 = sy.Matrix.zeros(0, 3)\r\nsy.Matrix.hstack(M1, M2, M3, M4).shape\r\n```\r\nreturns \r\n`(0, 6)`\r\n\r\nNow, same in sympy 1.1:\r\n```\r\nimport sympy as sy\r\nM1 = sy.Matrix.zeros(0, 0)\r\nM2 = sy.Matrix.zeros(0, 1)\r\nM3 = sy.Matrix.zeros(0, 2)\r\nM4 = sy.Matrix.zeros(0, 3)\r\nsy.Matrix.hstack(M1, M2, M3, M4).shape\r\n```\r\nreturns\r\n`(0, 3)\r\n`\r\nwhereas:\r\n```\r\nimport sympy as sy\r\nM1 = sy.Matrix.zeros(1, 0)\r\nM2 = sy.Matrix.zeros(1, 1)\r\nM3 = sy.Matrix.zeros(1, 2)\r\nM4 = sy.Matrix.zeros(1, 3)\r\nsy.Matrix.hstack(M1, M2, M3, M4).shape\r\n```\r\nreturns\r\n`(1, 6)\r\n`\n", - "golden_patch": "diff --git a/sympy/matrices/sparse.py b/sympy/matrices/sparse.py\n--- a/sympy/matrices/sparse.py\n+++ b/sympy/matrices/sparse.py\n@@ -985,8 +985,10 @@ def col_join(self, other):\n >>> C == A.row_insert(A.rows, Matrix(B))\n True\n \"\"\"\n- if not self:\n- return type(self)(other)\n+ # A null matrix can always be stacked (see #10770)\n+ if self.rows == 0 and self.cols != other.cols:\n+ return self._new(0, other.cols, []).col_join(other)\n+\n A, B = self, other\n if not A.cols == B.cols:\n raise ShapeError()\n@@ -1191,8 +1193,10 @@ def row_join(self, other):\n >>> C == A.col_insert(A.cols, B)\n True\n \"\"\"\n- if not self:\n- return type(self)(other)\n+ # A null matrix can always be stacked (see #10770)\n+ if self.cols == 0 and self.rows != other.rows:\n+ return self._new(other.rows, 0, []).row_join(other)\n+\n A, B = self, other\n if not A.rows == B.rows:\n raise ShapeError()\n", - "expected_spans": { - "sympy/matrices/sparse.py": [ - "MutableSparseMatrix.col_join", - "MutableSparseMatrix.row_join" - ] - }, - "resolved_by": [ - { - "name": "20240523_aider", - "patch": "diff --git a/sympy/matrices/common.py b/sympy/matrices/common.py\nindex 7ef51bc847..5443d8ddc7 100644\n--- a/sympy/matrices/common.py\n+++ b/sympy/matrices/common.py\n@@ -378,6 +378,9 @@ def hstack(cls, *args):\n return cls._new()\n \n kls = type(args[0])\n+ if all(arg.rows == 0 for arg in args):\n+ total_cols = sum(arg.cols for arg in args)\n+ return cls._new(0, total_cols, [])\n return reduce(kls.row_join, args)\n \n def reshape(self, rows, cols):\n@@ -601,6 +604,9 @@ def vstack(cls, *args):\n return cls._new()\n \n kls = type(args[0])\n+ if all(arg.cols == 0 for arg in args):\n+ total_rows = sum(arg.rows for arg in args)\n+ return cls._new(total_rows, 0, [])\n return reduce(kls.col_join, args)\n \n \n" + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/sympy/matrices/common.py b/sympy/matrices/common.py\nindex 7ef51bc847..6bdaf0d0cf 100644\n--- a/sympy/matrices/common.py\n+++ b/sympy/matrices/common.py\n@@ -211,7 +211,7 @@ def col_insert(self, pos, other):\n elif pos > self.cols:\n pos = self.cols\n \n- if self.rows != other.rows:\n+ if self.rows != other.rows and self.rows != 0 and other.rows != 0:\n raise ShapeError(\n \"self and other must have the same number of rows.\")\n \n@@ -378,7 +378,14 @@ def hstack(cls, *args):\n return cls._new()\n \n kls = type(args[0])\n- return reduce(kls.row_join, args)\n+ non_empty_matrices = [arg for arg in args if arg.rows > 0]\n+ if not non_empty_matrices:\n+ return cls._new(0, sum(arg.cols for arg in args), [])\n+ \n+ joined_matrix = non_empty_matrices[0]\n+ for m in non_empty_matrices[1:]:\n+ joined_matrix = joined_matrix.row_join(m)\n+ return joined_matrix\n \n def reshape(self, rows, cols):\n \"\"\"Reshape the matrix. Total number of elements must remain the same.\n@@ -601,7 +608,14 @@ def vstack(cls, *args):\n return cls._new()\n \n kls = type(args[0])\n- return reduce(kls.col_join, args)\n+ non_empty_matrices = [arg for arg in args if arg.cols > 0]\n+ if not non_empty_matrices:\n+ return cls._new(sum(arg.rows for arg in args), 0, [])\n+ \n+ joined_matrix = non_empty_matrices[0]\n+ for m in non_empty_matrices[1:]:\n+ joined_matrix = joined_matrix.col_join(m)\n+ return joined_matrix\n \n \n class MatrixSpecial(MatrixRequired):\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-13043", - "repo": "sympy/sympy", - "base_commit": "a3389a25ec84d36f5cf04a4f2562d820f131db64", - "problem_statement": "decompose() function in intpoly returns a list of arbitrary order\nThe decompose() function, with separate=True, returns `list(poly_dict.values())`, which is ordered arbitrarily. \r\n\r\nWhat is this used for? It should be sorted somehow, or returning a set (in which case, why not just use the returned dictionary and have the caller take the values). This is causing test failures for me after some changes to the core. \r\n\r\nCC @ArifAhmed1995 @certik \n", - "golden_patch": "diff --git a/sympy/integrals/intpoly.py b/sympy/integrals/intpoly.py\n--- a/sympy/integrals/intpoly.py\n+++ b/sympy/integrals/intpoly.py\n@@ -556,7 +556,7 @@ def decompose(expr, separate=False):\n >>> decompose(x**2 + x*y + x + y + x**3*y**2 + y**5)\n {1: x + y, 2: x**2 + x*y, 5: x**3*y**2 + y**5}\n >>> decompose(x**2 + x*y + x + y + x**3*y**2 + y**5, True)\n- [x, y, x**2, y**5, x*y, x**3*y**2]\n+ {x, x**2, y, y**5, x*y, x**3*y**2}\n \"\"\"\n expr = S(expr)\n poly_dict = {}\n@@ -569,7 +569,7 @@ def decompose(expr, separate=False):\n degrees = [(sum(degree_list(monom, *symbols)), monom)\n for monom in expr.args]\n if separate:\n- return [monom[1] for monom in degrees]\n+ return {monom[1] for monom in degrees}\n else:\n for monom in degrees:\n degree, term = monom\n@@ -593,7 +593,7 @@ def decompose(expr, separate=False):\n poly_dict[0] = expr\n \n if separate:\n- return list(poly_dict.values())\n+ return set(poly_dict.values())\n return poly_dict\n \n \n", - "expected_spans": { - "sympy/integrals/intpoly.py": [ - "decompose" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-13146", - "repo": "sympy/sympy", - "base_commit": "b678d8103e48fdb1af335dbf0080b3d5366f2d17", - "problem_statement": "Exponent doesn't fully simplify\nSay I have code like this:\n\n```\nimport sympy\nfrom sympy import *\nx=Symbol('x')\nexpr1 = S(1)/2*x**2.5\nexpr2 = S(1)*x**(S(5)/2)/2\nres = expr1-expr2\nres= simplify(res.evalf(5))\nprint res\n```\n\nThe output is\n`-0.5*x**2.5 + 0.5*x**2.5`\nHow do I simplify it to 0?\n\n", - "golden_patch": "diff --git a/sympy/core/operations.py b/sympy/core/operations.py\n--- a/sympy/core/operations.py\n+++ b/sympy/core/operations.py\n@@ -332,9 +332,7 @@ def _eval_evalf(self, prec):\n args.append(a)\n else:\n args.append(newa)\n- if not _aresame(tuple(args), tail_args):\n- tail = self.func(*args)\n- return self.func(x, tail)\n+ return self.func(x, *args)\n \n # this is the same as above, but there were no pure-number args to\n # deal with\n@@ -345,9 +343,7 @@ def _eval_evalf(self, prec):\n args.append(a)\n else:\n args.append(newa)\n- if not _aresame(tuple(args), self.args):\n- return self.func(*args)\n- return self\n+ return self.func(*args)\n \n @classmethod\n def make_args(cls, expr):\n", - "expected_spans": { - "sympy/core/operations.py": [ - "AssocOp._eval_evalf" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-13177", - "repo": "sympy/sympy", - "base_commit": "662cfb818e865f580e18b59efbb3540c34232beb", - "problem_statement": "Mod(x**2, x) is not (always) 0\nWhen the base is not an integer, `x**2 % x` is not 0. The base is not tested to be an integer in Mod's eval logic:\r\n\r\n```\r\nif (p == q or p == -q or\r\n p.is_Pow and p.exp.is_Integer and p.base == q or\r\n p.is_integer and q == 1):\r\n return S.Zero\r\n```\r\n\r\nso\r\n\r\n```\r\n>>> Mod(x**2, x)\r\n0\r\n```\r\nbut\r\n```\r\n>>> x = S(1.5)\r\n>>> Mod(x**2, x)\r\n0.75\r\n```\n", - "golden_patch": "diff --git a/sympy/core/mod.py b/sympy/core/mod.py\n--- a/sympy/core/mod.py\n+++ b/sympy/core/mod.py\n@@ -39,7 +39,8 @@ def doit(p, q):\n if p.is_infinite or q.is_infinite or p is nan or q is nan:\n return nan\n if (p == q or p == -q or\n- p.is_Pow and p.exp.is_Integer and p.base == q or\n+ p.is_Pow and p.exp.is_integer and p.base == q and q.is_integer\n+ and p.exp.is_positive or\n p.is_integer and q == 1):\n return S.Zero\n \n", - "expected_spans": { - "sympy/core/mod.py": [] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-13437", - "repo": "sympy/sympy", - "base_commit": "674afc619d7f5c519b6a5393a8b0532a131e57e0", - "problem_statement": "bell(n).limit(n, oo) should be oo rather than bell(oo)\n`bell(n).limit(n,oo)` should take the value infinity, but the current output is `bell(oo)`. As the Bell numbers represent the number of partitions of a set, it seems natural that `bell(oo)` should be able to be evaluated rather than be returned unevaluated. This issue is also in line with the recent fixes to the corresponding limit for the Fibonacci numbers and Lucas numbers.\n\n```\nfrom sympy import *\nn = symbols('n')\nbell(n).limit(n,oo)\n\nOutput:\nbell(oo)\n```\n\nI'm new to Sympy, so I'd appreciate the opportunity to fix this bug myself if that's alright.\n\n", - "golden_patch": "diff --git a/sympy/functions/combinatorial/numbers.py b/sympy/functions/combinatorial/numbers.py\n--- a/sympy/functions/combinatorial/numbers.py\n+++ b/sympy/functions/combinatorial/numbers.py\n@@ -424,6 +424,15 @@ def _bell_incomplete_poly(n, k, symbols):\n \n @classmethod\n def eval(cls, n, k_sym=None, symbols=None):\n+ if n is S.Infinity:\n+ if k_sym is None:\n+ return S.Infinity\n+ else:\n+ raise ValueError(\"Bell polynomial is not defined\")\n+\n+ if n.is_negative or n.is_integer is False:\n+ raise ValueError(\"a non-negative integer expected\")\n+\n if n.is_Integer and n.is_nonnegative:\n if k_sym is None:\n return Integer(cls._bell(int(n)))\n", - "expected_spans": { - "sympy/functions/combinatorial/numbers.py": [ - "bell._bell_incomplete_poly" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-13471", - "repo": "sympy/sympy", - "base_commit": "3546ac7ed78e1780c1a76929864bb33330055740", - "problem_statement": "Python 2->3 pickle fails with float-containing expressions\nDumping a pickled sympy expression containing a float in Python 2, then loading it in Python 3 generates an error.\r\n\r\nHere is a minimum working example, verified with sympy git commit 3546ac7 (master at time of writing), Python 2.7 and Python 3.6:\r\n\r\n```python\r\npython2 -c 'import pickle; import sympy; x = sympy.symbols(\"x\"); print pickle.dumps(x + 1.0, 2)' | python3 -c 'import pickle; import sys; print(pickle.loads(sys.stdin.buffer.read()))'\r\n```\r\n\r\nand the result:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"/Users/alex/git/VU/sympy/sympy/core/numbers.py\", line 1045, in __new__\r\n num[1] = long(num[1], 16)\r\nValueError: invalid literal for int() with base 16: '1L'\r\n```\n", - "golden_patch": "diff --git a/sympy/core/numbers.py b/sympy/core/numbers.py\n--- a/sympy/core/numbers.py\n+++ b/sympy/core/numbers.py\n@@ -1042,6 +1042,11 @@ def __new__(cls, num, dps=None, prec=None, precision=None):\n # it's a hexadecimal (coming from a pickled object)\n # assume that it is in standard form\n num = list(num)\n+ # If we're loading an object pickled in Python 2 into\n+ # Python 3, we may need to strip a tailing 'L' because\n+ # of a shim for int on Python 3, see issue #13470.\n+ if num[1].endswith('L'):\n+ num[1] = num[1][:-1]\n num[1] = long(num[1], 16)\n _mpf_ = tuple(num)\n else:\n", - "expected_spans": { - "sympy/core/numbers.py": [ - "Float.__new__" - ] - }, - "resolved_by": [ - { - "name": "20240523_aider", - "patch": "diff --git a/sympy/core/numbers.py b/sympy/core/numbers.py\nindex 7bc96e68eb..2cbdc1534d 100644\n--- a/sympy/core/numbers.py\n+++ b/sympy/core/numbers.py\n@@ -1042,7 +1042,11 @@ def __new__(cls, num, dps=None, prec=None, precision=None):\n # it's a hexadecimal (coming from a pickled object)\n # assume that it is in standard form\n num = list(num)\n- num[1] = long(num[1], 16)\n+ if num[1].endswith('L'):\n+ # remove the trailing 'L' for compatibility\n+ # between Python 2 and 3\n+ num[1] = num[1][:-1]\n+ num[1] = int(num[1], 16)\n _mpf_ = tuple(num)\n else:\n if len(num) == 4:\n" + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/sympy/core/numbers.py\n+++ b/sympy/core/numbers.py\n@@ -1042,6 +1042,8 @@\n # it's a hexadecimal (coming from a pickled object)\n # assume that it is in standard form\n num = list(num)\n+ if num[1].endswith('L'):\n+ num[1] = num[1][:-1]\n num[1] = long(num[1], 16)\n _mpf_ = tuple(num)\n else:\n" + "name": "20240828_autose_mixed", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/sympy/core/numbers.py b/sympy/core/numbers.py\nindex 7bc96e68eb..260de6617e 100644\n--- a/sympy/core/numbers.py\n+++ b/sympy/core/numbers.py\n@@ -1042,7 +1042,9 @@ def __new__(cls, num, dps=None, prec=None, precision=None):\n # it's a hexadecimal (coming from a pickled object)\n # assume that it is in standard form\n num = list(num)\n- num[1] = long(num[1], 16)\n+ if num[1].endswith('L'):\n+ num[1] = num[1][:-1]\n+ num[1] = int(num[1], 16)\n _mpf_ = tuple(num)\n else:\n if len(num) == 4:\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-13480", - "repo": "sympy/sympy", - "base_commit": "f57fe3f4b3f2cab225749e1b3b38ae1bf80b62f0", - "problem_statement": ".subs on coth(log(tan(x))) errors for certain integral values\n >>> from sympy import *\r\n >>> x = Symbol('x')\r\n >>> e = coth(log(tan(x)))\r\n >>> print(e.subs(x, 2))\r\n ...\r\n File \"C:\\Users\\E\\Desktop\\sympy-master\\sympy\\functions\\elementary\\hyperbolic.py\", line 590, in eval\r\n if cotm is S.ComplexInfinity:\r\n NameError: name 'cotm' is not defined\r\n\r\nFails for 2, 3, 5, 6, 8, 9, 11, 12, 13, 15, 18, ... etc.\n", - "golden_patch": "diff --git a/sympy/functions/elementary/hyperbolic.py b/sympy/functions/elementary/hyperbolic.py\n--- a/sympy/functions/elementary/hyperbolic.py\n+++ b/sympy/functions/elementary/hyperbolic.py\n@@ -587,7 +587,7 @@ def eval(cls, arg):\n x, m = _peeloff_ipi(arg)\n if m:\n cothm = coth(m)\n- if cotm is S.ComplexInfinity:\n+ if cothm is S.ComplexInfinity:\n return coth(x)\n else: # cothm == 0\n return tanh(x)\n", - "expected_spans": { - "sympy/functions/elementary/hyperbolic.py": [ - "coth.eval" - ] - }, - "resolved_by": [ - { - "name": "20240402_sweagent_claude3opus", - "patch": "diff --git a/sympy/functions/elementary/hyperbolic.py b/sympy/functions/elementary/hyperbolic.py\nindex 2a4555d919..5fbb73c786 100644\n--- a/sympy/functions/elementary/hyperbolic.py\n+++ b/sympy/functions/elementary/hyperbolic.py\n@@ -587,7 +587,7 @@ def eval(cls, arg):\n x, m = _peeloff_ipi(arg)\n if m:\n cothm = coth(m)\n- if cotm is S.ComplexInfinity:\n+ if cothm is S.ComplexInfinity:\n return coth(x)\n else: # cothm == 0\n return tanh(x)\n" + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240402_sweagent_gpt4", - "patch": "diff --git a/sympy/functions/elementary/hyperbolic.py b/sympy/functions/elementary/hyperbolic.py\nindex 2a4555d919..5fbb73c786 100644\n--- a/sympy/functions/elementary/hyperbolic.py\n+++ b/sympy/functions/elementary/hyperbolic.py\n@@ -587,7 +587,7 @@ def eval(cls, arg):\n x, m = _peeloff_ipi(arg)\n if m:\n cothm = coth(m)\n- if cotm is S.ComplexInfinity:\n+ if cothm is S.ComplexInfinity:\n return coth(x)\n else: # cothm == 0\n return tanh(x)\n" + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240509_amazon-q-developer-agent-20240430-dev", - "patch": "--- a/sympy/functions/elementary/hyperbolic.py\n+++ b/sympy/functions/elementary/hyperbolic.py\n@@ -584,13 +584,13 @@\n return -cls(-arg)\n \n if arg.is_Add:\n x, m = _peeloff_ipi(arg)\n if m:\n cothm = coth(m)\n- if cotm is S.ComplexInfinity:\n+ if cothm is S.ComplexInfinity:\n return coth(x)\n else: # cothm == 0\n return tanh(x)\n \n if arg.func == asinh:\n x = arg.args[0]\n" + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} }, { - "name": "20240523_aider", - "patch": "diff --git a/sympy/functions/elementary/hyperbolic.py b/sympy/functions/elementary/hyperbolic.py\nindex 2a4555d919..5fbb73c786 100644\n--- a/sympy/functions/elementary/hyperbolic.py\n+++ b/sympy/functions/elementary/hyperbolic.py\n@@ -587,7 +587,7 @@ def eval(cls, arg):\n x, m = _peeloff_ipi(arg)\n if m:\n cothm = coth(m)\n- if cotm is S.ComplexInfinity:\n+ if cothm is S.ComplexInfinity:\n return coth(x)\n else: # cothm == 0\n return tanh(x)\n" + "name": "20240706_sima_gpt4o", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/sympy/functions/elementary/hyperbolic.py\n+++ b/sympy/functions/elementary/hyperbolic.py\n@@ -587,7 +587,7 @@\n x, m = _peeloff_ipi(arg)\n if m:\n cothm = coth(m)\n- if cotm is S.ComplexInfinity:\n+ if cothm is S.ComplexInfinity:\n return coth(x)\n else: # cothm == 0\n return tanh(x)\n" + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/sympy/functions/elementary/hyperbolic.py b/sympy/functions/elementary/hyperbolic.py\nindex 2a4555d919..5fbb73c786 100644\n--- a/sympy/functions/elementary/hyperbolic.py\n+++ b/sympy/functions/elementary/hyperbolic.py\n@@ -587,7 +587,7 @@ def eval(cls, arg):\n x, m = _peeloff_ipi(arg)\n if m:\n cothm = coth(m)\n- if cotm is S.ComplexInfinity:\n+ if cothm is S.ComplexInfinity:\n return coth(x)\n else: # cothm == 0\n return tanh(x)\n" + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "requests/sessions.py": [ + "docstring", + "imports", + "merge_setting", + "merge_hooks", + "SessionRedirectMixin.resolve_redirects", + "SessionRedirectMixin.rebuild_auth", + "SessionRedirectMixin.rebuild_proxies", + "Session", + "Session.__init__", + "Session.prepare_request", + "Session.request", + "Session.get", + "Session.options", + "Session.head", + "Session.post", + "Session.put", + "Session.patch", + "Session.delete", + "Session.send", + "Session.merge_environment_settings", + "Session.get_adapter", + "Session.mount", + "Session.__getstate__", + "session" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "docstring", + "imports", + "merge_setting", + "merge_hooks", + "SessionRedirectMixin.resolve_redirects", + "SessionRedirectMixin.rebuild_auth", + "SessionRedirectMixin.rebuild_proxies", + "Session", + "Session.__init__", + "Session.prepare_request", + "Session.request", + "Session.get", + "Session.options", + "Session.head", + "Session.post", + "Session.put", + "Session.patch", + "Session.delete", + "Session.send", + "Session.merge_environment_settings", + "Session.get_adapter", + "Session.mount", + "Session.__getstate__", + "session" + ] + } }, { - "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/sympy/functions/elementary/hyperbolic.py b/sympy/functions/elementary/hyperbolic.py\nindex 2a4555d919..5fbb73c786 100644\n--- a/sympy/functions/elementary/hyperbolic.py\n+++ b/sympy/functions/elementary/hyperbolic.py\n@@ -587,7 +587,7 @@ def eval(cls, arg):\n x, m = _peeloff_ipi(arg)\n if m:\n cothm = coth(m)\n- if cotm is S.ComplexInfinity:\n+ if cothm is S.ComplexInfinity:\n return coth(x)\n else: # cothm == 0\n return tanh(x)\n" + "name": "20240820_honeycomb", + "updated_spans": { + "requests/compat.py": [ + "impl:49" + ], + "requests/sessions.py": [ + "docstring", + "imports", + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "docstring", + "imports", + "Session.request" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/sympy/functions/elementary/hyperbolic.py b/sympy/functions/elementary/hyperbolic.py\nindex 2a4555d919..d2a88913de 100644\n--- a/sympy/functions/elementary/hyperbolic.py\n+++ b/sympy/functions/elementary/hyperbolic.py\n@@ -587,10 +587,11 @@ def eval(cls, arg):\n x, m = _peeloff_ipi(arg)\n if m:\n cothm = coth(m)\n- if cotm is S.ComplexInfinity:\n+ if cothm is S.ComplexInfinity:\n return coth(x)\n- else: # cothm == 0\n- return tanh(x)\n+ elif cothm is S.Zero: # Corrected check for `cothm` is zero\n+ return tanh(x) # Assuming that tanh(x) is the intended return value when `cothm` is zero\n+ # Additional else condition might be required depending on the context\n \n if arg.func == asinh:\n x = arg.args[0]\n" + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/sympy/functions/elementary/hyperbolic.py b/sympy/functions/elementary/hyperbolic.py\nindex 2a4555d919..5fbb73c786 100644\n--- a/sympy/functions/elementary/hyperbolic.py\n+++ b/sympy/functions/elementary/hyperbolic.py\n@@ -587,7 +587,7 @@ def eval(cls, arg):\n x, m = _peeloff_ipi(arg)\n if m:\n cothm = coth(m)\n- if cotm is S.ComplexInfinity:\n+ if cothm is S.ComplexInfinity:\n return coth(x)\n else: # cothm == 0\n return tanh(x)\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-13647", - "repo": "sympy/sympy", - "base_commit": "67e3c956083d0128a621f65ee86a7dacd4f9f19f", - "problem_statement": "Matrix.col_insert() no longer seems to work correctly.\nExample:\r\n\r\n```\r\nIn [28]: import sympy as sm\r\n\r\nIn [29]: M = sm.eye(6)\r\n\r\nIn [30]: M\r\nOut[30]: \r\n\u23a11 0 0 0 0 0\u23a4\r\n\u23a2 \u23a5\r\n\u23a20 1 0 0 0 0\u23a5\r\n\u23a2 \u23a5\r\n\u23a20 0 1 0 0 0\u23a5\r\n\u23a2 \u23a5\r\n\u23a20 0 0 1 0 0\u23a5\r\n\u23a2 \u23a5\r\n\u23a20 0 0 0 1 0\u23a5\r\n\u23a2 \u23a5\r\n\u23a30 0 0 0 0 1\u23a6\r\n\r\nIn [31]: V = 2 * sm.ones(6, 2)\r\n\r\nIn [32]: V\r\nOut[32]: \r\n\u23a12 2\u23a4\r\n\u23a2 \u23a5\r\n\u23a22 2\u23a5\r\n\u23a2 \u23a5\r\n\u23a22 2\u23a5\r\n\u23a2 \u23a5\r\n\u23a22 2\u23a5\r\n\u23a2 \u23a5\r\n\u23a22 2\u23a5\r\n\u23a2 \u23a5\r\n\u23a32 2\u23a6\r\n\r\nIn [33]: M.col_insert(3, V)\r\nOut[33]: \r\n\u23a11 0 0 2 2 1 0 0\u23a4\r\n\u23a2 \u23a5\r\n\u23a20 1 0 2 2 0 1 0\u23a5\r\n\u23a2 \u23a5\r\n\u23a20 0 1 2 2 0 0 1\u23a5\r\n\u23a2 \u23a5\r\n\u23a20 0 0 2 2 0 0 0\u23a5\r\n\u23a2 \u23a5\r\n\u23a20 0 0 2 2 0 0 0\u23a5\r\n\u23a2 \u23a5\r\n\u23a30 0 0 2 2 0 0 0\u23a6\r\nIn [34]: sm.__version__\r\nOut[34]: '1.1.1'\r\n```\r\n\r\nThe 3 x 3 identify matrix to the right of the columns of twos is shifted from the bottom three rows to the top three rows.\r\n\r\n@siefkenj Do you think this has to do with your matrix refactor?\n", - "golden_patch": "diff --git a/sympy/matrices/common.py b/sympy/matrices/common.py\n--- a/sympy/matrices/common.py\n+++ b/sympy/matrices/common.py\n@@ -86,7 +86,7 @@ def entry(i, j):\n return self[i, j]\n elif pos <= j < pos + other.cols:\n return other[i, j - pos]\n- return self[i, j - pos - other.cols]\n+ return self[i, j - other.cols]\n \n return self._new(self.rows, self.cols + other.cols,\n lambda i, j: entry(i, j))\n", - "expected_spans": { - "sympy/matrices/common.py": [ - "MatrixShaping._eval_col_insert" - ] - }, - "resolved_by": [ - { - "name": "20240402_sweagent_gpt4", - "patch": "diff --git a/sympy/matrices/common.py b/sympy/matrices/common.py\nindex 056d9bc497..4aa75e4d77 100644\n--- a/sympy/matrices/common.py\n+++ b/sympy/matrices/common.py\n@@ -86,7 +86,7 @@ def entry(i, j):\n return self[i, j]\n elif pos <= j < pos + other.cols:\n return other[i, j - pos]\n- return self[i, j - pos - other.cols]\n+ return self[i, j - other.cols]\n \n return self._new(self.rows, self.cols + other.cols,\n lambda i, j: entry(i, j))\n" + "name": "20240908_infant_gpt4o", + "updated_spans": { + "requests/sessions.py": [ + "Session.prepare_request", + "Session.request", + "Session.send" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.prepare_request", + "Session.request", + "Session.send" + ] + } }, { - "name": "20240523_aider", - "patch": "diff --git a/sympy/assumptions/sathandlers.py b/sympy/assumptions/sathandlers.py\nindex 61d62d04cf..881a5f9ba8 100644\n--- a/sympy/assumptions/sathandlers.py\n+++ b/sympy/assumptions/sathandlers.py\n@@ -1,6 +1,7 @@\n from __future__ import print_function, division\n \n-from collections import MutableMapping, defaultdict\n+from collections.abc import MutableMapping\n+from collections import defaultdict\n \n from sympy.core import (Add, Mul, Pow, Integer, Number, NumberSymbol,)\n from sympy.core.numbers import ImaginaryUnit\ndiff --git a/sympy/core/basic.py b/sympy/core/basic.py\nindex d4ca323e0d..b42fd1d75c 100644\n--- a/sympy/core/basic.py\n+++ b/sympy/core/basic.py\n@@ -1,6 +1,7 @@\n \"\"\"Base class for all the objects in SymPy\"\"\"\n from __future__ import print_function, division\n-from collections import Mapping, defaultdict\n+from collections.abc import Mapping\n+from collections import defaultdict\n from itertools import chain\n \n from .assumptions import BasicMeta, ManagedProperties\ndiff --git a/sympy/core/containers.py b/sympy/core/containers.py\nindex 15abd93e3a..8ac11e7ecd 100644\n--- a/sympy/core/containers.py\n+++ b/sympy/core/containers.py\n@@ -268,7 +268,7 @@ def _sorted_args(self):\n return tuple(sorted(self.args, key=default_sort_key))\n \n \n-class OrderedSet(collections.MutableSet):\n+class OrderedSet(collections.abc.MutableSet):\n def __init__(self, iterable=None):\n if iterable:\n self.map = collections.OrderedDict((item, None) for item in iterable)\ndiff --git a/sympy/matrices/common.py b/sympy/matrices/common.py\nindex 056d9bc497..4aa75e4d77 100644\n--- a/sympy/matrices/common.py\n+++ b/sympy/matrices/common.py\n@@ -86,7 +86,7 @@ def entry(i, j):\n return self[i, j]\n elif pos <= j < pos + other.cols:\n return other[i, j - pos]\n- return self[i, j - pos - other.cols]\n+ return self[i, j - other.cols]\n \n return self._new(self.rows, self.cols + other.cols,\n lambda i, j: entry(i, j))\ndiff --git a/sympy/matrices/dense.py b/sympy/matrices/dense.py\nindex 00395585b9..8726153816 100644\n--- a/sympy/matrices/dense.py\n+++ b/sympy/matrices/dense.py\n@@ -1,5 +1,6 @@\n from __future__ import print_function, division\n \n+import collections\n import random\n from sympy import Derivative\n \n@@ -421,6 +422,11 @@ def _new(cls, *args, **kwargs):\n rows, cols, flat_list = args\n else:\n rows, cols, flat_list = cls._handle_creation_inputs(*args, **kwargs)\n+ if len(args) == 3 and isinstance(args[2], collections.abc.Callable):\n+ # if we passed in a function, use that to populate the indices\n+ mat = list(args[2](i, j) for i in range(rows) for j in range(cols))\n+ flat_list = mat\n+ flat_list = list(flat_list) # create a shallow copy\n flat_list = list(flat_list) # create a shallow copy\n self = object.__new__(cls)\n self.rows = rows\ndiff --git a/sympy/matrices/matrices.py b/sympy/matrices/matrices.py\nindex 5e241c68e9..7477971c04 100644\n--- a/sympy/matrices/matrices.py\n+++ b/sympy/matrices/matrices.py\n@@ -2045,7 +2045,7 @@ def _handle_creation_inputs(cls, *args, **kwargs):\n \"Both dimensions must be positive\".format(rows, cols))\n \n # Matrix(2, 2, lambda i, j: i+j)\n- if len(args) == 3 and isinstance(args[2], collections.Callable):\n+ if len(args) == 3 and isinstance(args[2], collections.abc.Callable):\n op = args[2]\n flat_list = []\n for i in range(rows):\ndiff --git a/sympy/plotting/plot.py b/sympy/plotting/plot.py\nindex b3b0237d19..0b959a43a4 100644\n--- a/sympy/plotting/plot.py\n+++ b/sympy/plotting/plot.py\n@@ -25,7 +25,7 @@\n from __future__ import print_function, division\n \n import inspect\n-from collections import Callable\n+from collections.abc import Callable\n import warnings\n import sys\n \n" + "name": "20240829_Isoform", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/sympy/matrices/common.py\n+++ b/sympy/matrices/common.py\n@@ -86,7 +86,8 @@\n return self[i, j]\n elif pos <= j < pos + other.cols:\n return other[i, j - pos]\n- return self[i, j - pos - other.cols]\n+ else:\n+ return self[i, j - other.cols]\n \n return self._new(self.rows, self.cols + other.cols,\n lambda i, j: entry(i, j))\n" + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/sympy/matrices/common.py b/sympy/matrices/common.py\nindex 056d9bc497..31b39b82a0 100644\n--- a/sympy/matrices/common.py\n+++ b/sympy/matrices/common.py\n@@ -78,18 +78,23 @@ def entry(i, j):\n return self[i, j] if j < col else self[i, j + 1]\n return self._new(self.rows, self.cols - 1, entry)\n \n- def _eval_col_insert(self, pos, other):\n- cols = self.cols\n-\n- def entry(i, j):\n- if j < pos:\n- return self[i, j]\n- elif pos <= j < pos + other.cols:\n- return other[i, j - pos]\n- return self[i, j - pos - other.cols]\n+ def _eval_col_insert(self, icol, other):\n+ rows, cols = self.rows, self.cols\n+ new_matrix = []\n+\n+ for i in range(rows):\n+ new_row = []\n+ for j in range(cols + other.cols):\n+ if j < icol:\n+ new_row.append(self[i, j])\n+ elif icol <= j < icol + other.cols:\n+ new_row.append(other[i, j - icol])\n+ else:\n+ new_row.append(self[i, j - other.cols])\n+ new_matrix.append(new_row)\n \n- return self._new(self.rows, self.cols + other.cols,\n- lambda i, j: entry(i, j))\n+ flat_list = [elem for row in new_matrix for elem in row]\n+ return self._new(self.rows, self.cols + other.cols, flat_list)\n \n def _eval_col_join(self, other):\n rows = self.rows\n\n" + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/sympy/matrices/common.py b/sympy/matrices/common.py\nindex 056d9bc497..77244dbe0a 100644\n--- a/sympy/matrices/common.py\n+++ b/sympy/matrices/common.py\n@@ -88,8 +88,18 @@ def entry(i, j):\n return other[i, j - pos]\n return self[i, j - pos - other.cols]\n \n+ # Hypothetical patch: Adjust the logic for handling column indices\n+ # This is a placeholder and may not directly resolve the issue without further context\n+ def adjusted_entry(i, j):\n+ if j < pos:\n+ return self[i, j]\n+ elif pos <= j < pos + other.cols:\n+ return other[i, j - pos]\n+ else:\n+ return self[i, j - other.cols]\n+ \n return self._new(self.rows, self.cols + other.cols,\n- lambda i, j: entry(i, j))\n+ lambda i, j: adjusted_entry(i, j))\n \n def _eval_col_join(self, other):\n rows = self.rows\n" + "name": "20240622_Lingma_Agent", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/sympy/matrices/common.py b/sympy/matrices/common.py\nindex 056d9bc497..4aa75e4d77 100644\n--- a/sympy/matrices/common.py\n+++ b/sympy/matrices/common.py\n@@ -86,7 +86,7 @@ def entry(i, j):\n return self[i, j]\n elif pos <= j < pos + other.cols:\n return other[i, j - pos]\n- return self[i, j - pos - other.cols]\n+ return self[i, j - other.cols]\n \n return self._new(self.rows, self.cols + other.cols,\n lambda i, j: entry(i, j))\n" + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/sympy/matrices/common.py b/sympy/matrices/common.py\nindex 056d9bc497..4aa75e4d77 100644\n--- a/sympy/matrices/common.py\n+++ b/sympy/matrices/common.py\n@@ -86,7 +86,7 @@ def entry(i, j):\n return self[i, j]\n elif pos <= j < pos + other.cols:\n return other[i, j - pos]\n- return self[i, j - pos - other.cols]\n+ return self[i, j - other.cols]\n \n return self._new(self.rows, self.cols + other.cols,\n lambda i, j: entry(i, j))\n" + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/sympy/matrices/common.py b/sympy/matrices/common.py\nindex 056d9bc497..bae0df82f8 100644\n--- a/sympy/matrices/common.py\n+++ b/sympy/matrices/common.py\n@@ -86,7 +86,8 @@ def entry(i, j):\n return self[i, j]\n elif pos <= j < pos + other.cols:\n return other[i, j - pos]\n- return self[i, j - pos - other.cols]\n+ else:\n+ return self[i, j - other.cols]\n \n return self._new(self.rows, self.cols + other.cols,\n lambda i, j: entry(i, j))\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "astropy__astropy-6938", - "repo": "astropy/astropy", - "base_commit": "c76af9ed6bb89bfba45b9f5bc1e635188278e2fa", - "problem_statement": "Possible bug in io.fits related to D exponents\nI came across the following code in ``fitsrec.py``:\r\n\r\n```python\r\n # Replace exponent separator in floating point numbers\r\n if 'D' in format:\r\n output_field.replace(encode_ascii('E'), encode_ascii('D'))\r\n```\r\n\r\nI think this may be incorrect because as far as I can tell ``replace`` is not an in-place operation for ``chararray`` (it returns a copy). Commenting out this code doesn't cause any tests to fail so I think this code isn't being tested anyway.\n", - "golden_patch": "diff --git a/astropy/io/fits/fitsrec.py b/astropy/io/fits/fitsrec.py\n--- a/astropy/io/fits/fitsrec.py\n+++ b/astropy/io/fits/fitsrec.py\n@@ -1261,7 +1261,7 @@ def _scale_back_ascii(self, col_idx, input_field, output_field):\n \n # Replace exponent separator in floating point numbers\n if 'D' in format:\n- output_field.replace(encode_ascii('E'), encode_ascii('D'))\n+ output_field[:] = output_field.replace(b'E', b'D')\n \n \n def _get_recarray_field(array, key):\n", - "expected_spans": { - "astropy/io/fits/fitsrec.py": [ - "FITS_rec", - "FITS_rec._scale_back_ascii" - ] - }, - "resolved_by": [ + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } + }, { - "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/astropy/io/fits/fitsrec.py b/astropy/io/fits/fitsrec.py\nindex 574b4073b..8890e158e 100644\n--- a/astropy/io/fits/fitsrec.py\n+++ b/astropy/io/fits/fitsrec.py\n@@ -1261,7 +1261,7 @@ class FITS_rec(np.recarray):\n \n # Replace exponent separator in floating point numbers\n if 'D' in format:\n- output_field.replace(encode_ascii('E'), encode_ascii('D'))\n+ output_field[:] = output_field.replace(encode_ascii('E'), encode_ascii('D'))\n \n \n def _get_recarray_field(array, key):\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "scikit-learn__scikit-learn-10297", - "repo": "scikit-learn/scikit-learn", - "base_commit": "b90661d6a46aa3619d3eec94d5281f5888add501", - "problem_statement": "linear_model.RidgeClassifierCV's Parameter store_cv_values issue\n#### Description\r\nParameter store_cv_values error on sklearn.linear_model.RidgeClassifierCV\r\n\r\n#### Steps/Code to Reproduce\r\nimport numpy as np\r\nfrom sklearn import linear_model as lm\r\n\r\n#test database\r\nn = 100\r\nx = np.random.randn(n, 30)\r\ny = np.random.normal(size = n)\r\n\r\nrr = lm.RidgeClassifierCV(alphas = np.arange(0.1, 1000, 0.1), normalize = True, \r\n store_cv_values = True).fit(x, y)\r\n\r\n#### Expected Results\r\nExpected to get the usual ridge regression model output, keeping the cross validation predictions as attribute.\r\n\r\n#### Actual Results\r\nTypeError: __init__() got an unexpected keyword argument 'store_cv_values'\r\n\r\nlm.RidgeClassifierCV actually has no parameter store_cv_values, even though some attributes depends on it.\r\n\r\n#### Versions\r\nWindows-10-10.0.14393-SP0\r\nPython 3.6.3 |Anaconda, Inc.| (default, Oct 15 2017, 03:27:45) [MSC v.1900 64 bit (AMD64)]\r\nNumPy 1.13.3\r\nSciPy 0.19.1\r\nScikit-Learn 0.19.1\r\n\r\n\nAdd store_cv_values boolean flag support to RidgeClassifierCV\nAdd store_cv_values support to RidgeClassifierCV - documentation claims that usage of this flag is possible:\n\n> cv_values_ : array, shape = [n_samples, n_alphas] or shape = [n_samples, n_responses, n_alphas], optional\n> Cross-validation values for each alpha (if **store_cv_values**=True and `cv=None`).\n\nWhile actually usage of this flag gives \n\n> TypeError: **init**() got an unexpected keyword argument 'store_cv_values'\n\n", - "golden_patch": "diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py\n--- a/sklearn/linear_model/ridge.py\n+++ b/sklearn/linear_model/ridge.py\n@@ -1212,18 +1212,18 @@ class RidgeCV(_BaseRidgeCV, RegressorMixin):\n \n store_cv_values : boolean, default=False\n Flag indicating if the cross-validation values corresponding to\n- each alpha should be stored in the `cv_values_` attribute (see\n- below). This flag is only compatible with `cv=None` (i.e. using\n+ each alpha should be stored in the ``cv_values_`` attribute (see\n+ below). This flag is only compatible with ``cv=None`` (i.e. using\n Generalized Cross-Validation).\n \n Attributes\n ----------\n cv_values_ : array, shape = [n_samples, n_alphas] or \\\n shape = [n_samples, n_targets, n_alphas], optional\n- Cross-validation values for each alpha (if `store_cv_values=True` and \\\n- `cv=None`). After `fit()` has been called, this attribute will \\\n- contain the mean squared errors (by default) or the values of the \\\n- `{loss,score}_func` function (if provided in the constructor).\n+ Cross-validation values for each alpha (if ``store_cv_values=True``\\\n+ and ``cv=None``). After ``fit()`` has been called, this attribute \\\n+ will contain the mean squared errors (by default) or the values \\\n+ of the ``{loss,score}_func`` function (if provided in the constructor).\n \n coef_ : array, shape = [n_features] or [n_targets, n_features]\n Weight vector(s).\n@@ -1301,14 +1301,19 @@ class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``\n \n+ store_cv_values : boolean, default=False\n+ Flag indicating if the cross-validation values corresponding to\n+ each alpha should be stored in the ``cv_values_`` attribute (see\n+ below). This flag is only compatible with ``cv=None`` (i.e. using\n+ Generalized Cross-Validation).\n+\n Attributes\n ----------\n- cv_values_ : array, shape = [n_samples, n_alphas] or \\\n- shape = [n_samples, n_responses, n_alphas], optional\n- Cross-validation values for each alpha (if `store_cv_values=True` and\n- `cv=None`). After `fit()` has been called, this attribute will contain \\\n- the mean squared errors (by default) or the values of the \\\n- `{loss,score}_func` function (if provided in the constructor).\n+ cv_values_ : array, shape = [n_samples, n_targets, n_alphas], optional\n+ Cross-validation values for each alpha (if ``store_cv_values=True`` and\n+ ``cv=None``). After ``fit()`` has been called, this attribute will\n+ contain the mean squared errors (by default) or the values of the\n+ ``{loss,score}_func`` function (if provided in the constructor).\n \n coef_ : array, shape = [n_features] or [n_targets, n_features]\n Weight vector(s).\n@@ -1333,10 +1338,11 @@ class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):\n advantage of the multi-variate response support in Ridge.\n \"\"\"\n def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,\n- normalize=False, scoring=None, cv=None, class_weight=None):\n+ normalize=False, scoring=None, cv=None, class_weight=None,\n+ store_cv_values=False):\n super(RidgeClassifierCV, self).__init__(\n alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,\n- scoring=scoring, cv=cv)\n+ scoring=scoring, cv=cv, store_cv_values=store_cv_values)\n self.class_weight = class_weight\n \n def fit(self, X, y, sample_weight=None):\n", - "expected_spans": { - "sklearn/linear_model/ridge.py": [ - "RidgeCV", - "RidgeClassifierCV" - ] - }, - "resolved_by": [ + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } + }, { - "name": "20240402_sweagent_gpt4", - "patch": "diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py\nindex 911c74ae3..f486e8074 100644\n--- a/sklearn/linear_model/ridge.py\n+++ b/sklearn/linear_model/ridge.py\n@@ -1301,6 +1301,13 @@ class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``\n \n+ store_cv_values : boolean, default=False\n+ Flag indicating if the cross-validation values should be stored in the\n+ `cv_values_` attribute (see below). If set to True, the cross-validation\n+ values are available after fitting and can be used to evaluate the model.\n+ Note that this parameter is only effective when `cv=None` (the default).\n+\n+\n Attributes\n ----------\n cv_values_ : array, shape = [n_samples, n_alphas] or \\\n@@ -1333,11 +1340,13 @@ class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):\n advantage of the multi-variate response support in Ridge.\n \"\"\"\n def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,\n- normalize=False, scoring=None, cv=None, class_weight=None):\n+ normalize=False, scoring=None, cv=None, class_weight=None,\n+ store_cv_values=False):\n super(RidgeClassifierCV, self).__init__(\n alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,\n- scoring=scoring, cv=cv)\n+ scoring=scoring, cv=cv, store_cv_values=store_cv_values)\n self.class_weight = class_weight\n+ self.store_cv_values = store_cv_values\n \n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit the ridge classifier.\n" + "name": "20240617_factory_code_droid", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240509_amazon-q-developer-agent-20240430-dev", - "patch": "--- a/sklearn/linear_model/ridge.py\n+++ b/sklearn/linear_model/ridge.py\n@@ -1330,16 +1330,17 @@\n -----\n For multi-class classification, n_class classifiers are trained in\n a one-versus-all approach. Concretely, this is implemented by taking\n advantage of the multi-variate response support in Ridge.\n \"\"\"\n def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,\n- normalize=False, scoring=None, cv=None, class_weight=None):\n+ normalize=False, scoring=None, cv=None, class_weight=None,\n+ store_cv_values=False):\n super(RidgeClassifierCV, self).__init__(\n alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,\n- scoring=scoring, cv=cv)\n+ scoring=scoring, cv=cv, store_cv_values=store_cv_values)\n self.class_weight = class_weight\n \n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit the ridge classifier.\n \n Parameters\n" + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "requests/cookies.py": [ + "docstring", + "RequestsCookieJar" + ], + "requests/models.py": [ + "docstring", + "RequestHooksMixin.register_hook" + ], + "requests/packages/urllib3/_collections.py": [ + "imports" + ], + "requests/sessions.py": [ + "docstring", + "imports", + "Session.request" + ], + "requests/structures.py": [ + "docstring", + "CaseInsensitiveDict" + ], + "requests/utils.py": [ + "docstring", + "to_key_val_list" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "docstring", + "imports", + "Session.request" + ] + } }, { - "name": "20240523_aider", - "patch": "diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py\nindex 911c74ae3..b1fccc2e9 100644\n--- a/sklearn/linear_model/ridge.py\n+++ b/sklearn/linear_model/ridge.py\n@@ -1333,10 +1333,11 @@ class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):\n advantage of the multi-variate response support in Ridge.\n \"\"\"\n def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,\n- normalize=False, scoring=None, cv=None, class_weight=None):\n+ normalize=False, scoring=None, cv=None, class_weight=None,\n+ store_cv_values=False):\n super(RidgeClassifierCV, self).__init__(\n alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,\n- scoring=scoring, cv=cv)\n+ scoring=scoring, cv=cv, store_cv_values=store_cv_values)\n self.class_weight = class_weight\n \n def fit(self, X, y, sample_weight=None):\n" + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py\nindex 911c74ae3..c3e614256 100644\n--- a/sklearn/linear_model/ridge.py\n+++ b/sklearn/linear_model/ridge.py\n@@ -1333,10 +1333,11 @@ class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):\n advantage of the multi-variate response support in Ridge.\n \"\"\"\n def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,\n- normalize=False, scoring=None, cv=None, class_weight=None):\n+ normalize=False, scoring=None, cv=None, class_weight=None, store_cv_values=False):\n super(RidgeClassifierCV, self).__init__(\n alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,\n scoring=scoring, cv=cv)\n+ self.store_cv_values = store_cv_values\n self.class_weight = class_weight\n \n def fit(self, X, y, sample_weight=None):\n" + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py\nindex 911c74ae3..9256551ec 100644\n--- a/sklearn/linear_model/ridge.py\n+++ b/sklearn/linear_model/ridge.py\n@@ -1333,11 +1333,12 @@ class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):\n advantage of the multi-variate response support in Ridge.\n \"\"\"\n def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,\n- normalize=False, scoring=None, cv=None, class_weight=None):\n+ normalize=False, scoring=None, cv=None, class_weight=None, store_cv_values=False):\n super(RidgeClassifierCV, self).__init__(\n alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,\n- scoring=scoring, cv=cv)\n+ scoring=scoring, cv=cv, store_cv_values=store_cv_values)\n self.class_weight = class_weight\n+ self.store_cv_values = store_cv_values\n \n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit the ridge classifier.\n" + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py\nindex 911c74ae3..b1fccc2e9 100644\n--- a/sklearn/linear_model/ridge.py\n+++ b/sklearn/linear_model/ridge.py\n@@ -1333,10 +1333,11 @@ class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):\n advantage of the multi-variate response support in Ridge.\n \"\"\"\n def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,\n- normalize=False, scoring=None, cv=None, class_weight=None):\n+ normalize=False, scoring=None, cv=None, class_weight=None,\n+ store_cv_values=False):\n super(RidgeClassifierCV, self).__init__(\n alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,\n- scoring=scoring, cv=cv)\n+ scoring=scoring, cv=cv, store_cv_values=store_cv_values)\n self.class_weight = class_weight\n \n def fit(self, X, y, sample_weight=None):\n" + "name": "20240811_gru", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py\nindex 911c74ae3..f486e8074 100644\n--- a/sklearn/linear_model/ridge.py\n+++ b/sklearn/linear_model/ridge.py\n@@ -1301,6 +1301,13 @@ class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``\n \n+ store_cv_values : boolean, default=False\n+ Flag indicating if the cross-validation values should be stored in the\n+ `cv_values_` attribute (see below). If set to True, the cross-validation\n+ values are available after fitting and can be used to evaluate the model.\n+ Note that this parameter is only effective when `cv=None` (the default).\n+\n+\n Attributes\n ----------\n cv_values_ : array, shape = [n_samples, n_alphas] or \\\n@@ -1333,11 +1340,13 @@ class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):\n advantage of the multi-variate response support in Ridge.\n \"\"\"\n def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,\n- normalize=False, scoring=None, cv=None, class_weight=None):\n+ normalize=False, scoring=None, cv=None, class_weight=None,\n+ store_cv_values=False):\n super(RidgeClassifierCV, self).__init__(\n alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,\n- scoring=scoring, cv=cv)\n+ scoring=scoring, cv=cv, store_cv_values=store_cv_values)\n self.class_weight = class_weight\n+ self.store_cv_values = store_cv_values\n \n def fit(self, X, y, sample_weight=None):\n \"\"\"Fit the ridge classifier.\n" + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "requests/sessions.py": [ + "Session.request" + ] + }, + "alternative_spans": { + "requests/sessions.py": [ + "Session.request" + ] + } } ], "alternative_spans": [ { - "run_name": "20240402_sweagent_gpt4", + "run_name": "20240523_aider", "spans": { - "sklearn/linear_model/ridge.py": [ - "RidgeClassifierCV" + "requests/sessions.py": [ + "Session.request" ] } }, { - "run_name": "20240509_amazon-q-developer-agent-20240430-dev", + "run_name": "20240524_opencsg_starship_gpt4", "spans": { - "sklearn/linear_model/ridge.py": [ - "RidgeClassifierCV" + "requests/sessions.py": [ + "Session.request" ] } }, { - "run_name": "20240523_aider", + "run_name": "20240630_agentless_gpt4o", "spans": { - "sklearn/linear_model/ridge.py": [ - "RidgeClassifierCV" + "requests/sessions.py": [ + "Session.request" ] } }, { - "run_name": "20240527_marscode-agent-dev", + "run_name": "20240828_autose_mixed", "spans": { - "sklearn/linear_model/ridge.py": [ - "RidgeClassifierCV" + "requests/sessions.py": [ + "Session.request" ] } }, { - "run_name": "20240530_autocoderover-v20240408", + "run_name": "20240615_appmap-navie_gpt4o", "spans": { - "sklearn/linear_model/ridge.py": [ - "RidgeClassifierCV" + "requests/sessions.py": [ + "Session.request" ] } }, { - "run_name": "20240609_moatless_gpt4o", + "run_name": "20240530_autocoderover-v20240408", "spans": { - "sklearn/linear_model/ridge.py": [ - "RidgeClassifierCV" + "requests/sessions.py": [ + "Session.request" ] } }, { - "run_name": "20240612_IBM_Research_Agent101", + "run_name": "20240706_sima_gpt4o", "spans": { - "sklearn/linear_model/ridge.py": [ - "RidgeClassifierCV" + "requests/sessions.py": [ + "Session.request" ] } - } - ] - }, - { - "instance_id": "sympy__sympy-13773", - "repo": "sympy/sympy", - "base_commit": "7121bdf1facdd90d05b6994b4c2e5b2865a4638a", - "problem_statement": "@ (__matmul__) should fail if one argument is not a matrix\n```\r\n>>> A = Matrix([[1, 2], [3, 4]])\r\n>>> B = Matrix([[2, 3], [1, 2]])\r\n>>> A@B\r\nMatrix([\r\n[ 4, 7],\r\n[10, 17]])\r\n>>> 2@B\r\nMatrix([\r\n[4, 6],\r\n[2, 4]])\r\n```\r\n\r\nRight now `@` (`__matmul__`) just copies `__mul__`, but it should actually only work if the multiplication is actually a matrix multiplication. \r\n\r\nThis is also how NumPy works\r\n\r\n```\r\n>>> import numpy as np\r\n>>> a = np.array([[1, 2], [3, 4]])\r\n>>> 2*a\r\narray([[2, 4],\r\n [6, 8]])\r\n>>> 2@a\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\nValueError: Scalar operands are not allowed, use '*' instead\r\n```\n", - "golden_patch": "diff --git a/sympy/matrices/common.py b/sympy/matrices/common.py\n--- a/sympy/matrices/common.py\n+++ b/sympy/matrices/common.py\n@@ -1973,6 +1973,10 @@ def __div__(self, other):\n \n @call_highest_priority('__rmatmul__')\n def __matmul__(self, other):\n+ other = _matrixify(other)\n+ if not getattr(other, 'is_Matrix', False) and not getattr(other, 'is_MatrixLike', False):\n+ return NotImplemented\n+\n return self.__mul__(other)\n \n @call_highest_priority('__rmul__')\n@@ -2066,6 +2070,10 @@ def __radd__(self, other):\n \n @call_highest_priority('__matmul__')\n def __rmatmul__(self, other):\n+ other = _matrixify(other)\n+ if not getattr(other, 'is_Matrix', False) and not getattr(other, 'is_MatrixLike', False):\n+ return NotImplemented\n+\n return self.__rmul__(other)\n \n @call_highest_priority('__mul__')\n", - "expected_spans": { - "sympy/matrices/common.py": [ - "MatrixArithmetic.__matmul__", - "MatrixArithmetic.__rmatmul__" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-13895", - "repo": "sympy/sympy", - "base_commit": "4da0b64558e9551a11a99bccc63557ba34f50c58", - "problem_statement": "(-x/4 - S(1)/12)**x - 1 simplifies to an inequivalent expression\n >>> from sympy import *\r\n >>> x = Symbol('x')\r\n >>> e = (-x/4 - S(1)/12)**x - 1\r\n >>> e\r\n (-x/4 - 1/12)**x - 1\r\n >>> f = simplify(e)\r\n >>> f\r\n 12**(-x)*(-12**x + (-3*x - 1)**x)\r\n >>> a = S(9)/5\r\n >>> simplify(e.subs(x,a))\r\n -1 - 32*15**(1/5)*2**(2/5)/225\r\n >>> simplify(f.subs(x,a))\r\n -1 - 32*(-1)**(4/5)*60**(1/5)/225\r\n >>> N(e.subs(x,a))\r\n -1.32255049319339\r\n >>> N(f.subs(x,a))\r\n -0.739051169462523 - 0.189590423018741*I\r\n\r\n\n", - "golden_patch": "diff --git a/sympy/core/numbers.py b/sympy/core/numbers.py\n--- a/sympy/core/numbers.py\n+++ b/sympy/core/numbers.py\n@@ -2248,11 +2248,9 @@ def _eval_power(self, expt):\n if p is not False:\n dict = {p[0]: p[1]}\n else:\n- dict = Integer(self).factors(limit=2**15)\n+ dict = Integer(b_pos).factors(limit=2**15)\n \n # now process the dict of factors\n- if self.is_negative:\n- dict[-1] = 1\n out_int = 1 # integer part\n out_rad = 1 # extracted radicals\n sqr_int = 1\n@@ -2282,10 +2280,12 @@ def _eval_power(self, expt):\n break\n for k, v in sqr_dict.items():\n sqr_int *= k**(v//sqr_gcd)\n- if sqr_int == self and out_int == 1 and out_rad == 1:\n+ if sqr_int == b_pos and out_int == 1 and out_rad == 1:\n result = None\n else:\n result = out_int*out_rad*Pow(sqr_int, Rational(sqr_gcd, expt.q))\n+ if self.is_negative:\n+ result *= Pow(S.NegativeOne, expt)\n return result\n \n def _eval_is_prime(self):\n", - "expected_spans": { - "sympy/core/numbers.py": [ - "Integer._eval_power" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-13915", - "repo": "sympy/sympy", - "base_commit": "5c1644ff85e15752f9f8721bc142bfbf975e7805", - "problem_statement": "Issue with a substitution that leads to an undefined expression\n```\r\nPython 3.6.4 |Anaconda custom (64-bit)| (default, Dec 21 2017, 15:39:08) \r\nType 'copyright', 'credits' or 'license' for more information\r\nIPython 6.2.1 -- An enhanced Interactive Python. Type '?' for help.\r\n\r\nIn [1]: from sympy import *\r\n\r\nIn [2]: a,b = symbols('a,b')\r\n\r\nIn [3]: r = (1/(a+b) + 1/(a-b))/(1/(a+b) - 1/(a-b))\r\n\r\nIn [4]: r.subs(b,a)\r\nOut[4]: 1\r\n\r\nIn [6]: import sympy\r\n\r\nIn [7]: sympy.__version__\r\nOut[7]: '1.1.1'\r\n```\r\n\r\nIf b is substituted by a, r is undefined. It is possible to calculate the limit\r\n`r.limit(b,a) # -1`\r\n\r\nBut whenever a subexpression of r is undefined, r itself is undefined.\n", - "golden_patch": "diff --git a/sympy/core/mul.py b/sympy/core/mul.py\n--- a/sympy/core/mul.py\n+++ b/sympy/core/mul.py\n@@ -423,6 +423,11 @@ def _gather(c_powers):\n changed = False\n for b, e in c_powers:\n if e.is_zero:\n+ # canceling out infinities yields NaN\n+ if (b.is_Add or b.is_Mul) and any(infty in b.args\n+ for infty in (S.ComplexInfinity, S.Infinity,\n+ S.NegativeInfinity)):\n+ return [S.NaN], [], None\n continue\n if e is S.One:\n if b.is_Number:\n", - "expected_spans": { - "sympy/core/mul.py": [ - "Mul.flatten" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "scikit-learn__scikit-learn-10508", - "repo": "scikit-learn/scikit-learn", - "base_commit": "c753b77ac49e72ebc0fe5e3c2369fe628f975017", - "problem_statement": "LabelEncoder transform fails for empty lists (for certain inputs)\nPython 3.6.3, scikit_learn 0.19.1\r\n\r\nDepending on which datatypes were used to fit the LabelEncoder, transforming empty lists works or not. Expected behavior would be that empty arrays are returned in both cases.\r\n\r\n```python\r\n>>> from sklearn.preprocessing import LabelEncoder\r\n>>> le = LabelEncoder()\r\n>>> le.fit([1,2])\r\nLabelEncoder()\r\n>>> le.transform([])\r\narray([], dtype=int64)\r\n>>> le.fit([\"a\",\"b\"])\r\nLabelEncoder()\r\n>>> le.transform([])\r\nTraceback (most recent call last):\r\n File \"[...]\\Python36\\lib\\site-packages\\numpy\\core\\fromnumeric.py\", line 57, in _wrapfunc\r\n return getattr(obj, method)(*args, **kwds)\r\nTypeError: Cannot cast array data from dtype('float64') to dtype('\", line 1, in \r\n File \"[...]\\Python36\\lib\\site-packages\\sklearn\\preprocessing\\label.py\", line 134, in transform\r\n return np.searchsorted(self.classes_, y)\r\n File \"[...]\\Python36\\lib\\site-packages\\numpy\\core\\fromnumeric.py\", line 1075, in searchsorted\r\n return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)\r\n File \"[...]\\Python36\\lib\\site-packages\\numpy\\core\\fromnumeric.py\", line 67, in _wrapfunc\r\n return _wrapit(obj, method, *args, **kwds)\r\n File \"[...]\\Python36\\lib\\site-packages\\numpy\\core\\fromnumeric.py\", line 47, in _wrapit\r\n result = getattr(asarray(obj), method)(*args, **kwds)\r\nTypeError: Cannot cast array data from dtype('float64') to dtype('>> a = Symbol('a', integer=True, positive=True)\r\n>>> e = (-a)**x * a**(-x)\r\n>>> f = simplify(e)\r\n>>> print(e)\r\na**(-x)*(-a)**x\r\n>>> print(f)\r\n(-1)**x\r\n>>> t = -S(10)/3\r\n>>> n1 = e.subs(x,t)\r\n>>> n2 = f.subs(x,t)\r\n>>> print(N(n1))\r\n-0.5 + 0.866025403784439*I\r\n>>> print(N(n2))\r\n-0.5 + 0.866025403784439*I\r\n```\r\n\r\nvs\r\n\r\n```\r\n>>> a = S(2)\r\n>>> e = (-a)**x * a**(-x)\r\n>>> f = simplify(e)\r\n>>> print(e)\r\n(-2)**x*2**(-x)\r\n>>> print(f)\r\n(-1)**x\r\n>>> t = -S(10)/3\r\n>>> n1 = e.subs(x,t)\r\n>>> n2 = f.subs(x,t)\r\n>>> print(N(n1))\r\n0.5 - 0.866025403784439*I\r\n>>> print(N(n2))\r\n-0.5 + 0.866025403784439*I\r\n```\n", - "golden_patch": "diff --git a/sympy/core/numbers.py b/sympy/core/numbers.py\n--- a/sympy/core/numbers.py\n+++ b/sympy/core/numbers.py\n@@ -1678,11 +1678,7 @@ def _eval_power(self, expt):\n if (ne is S.One):\n return Rational(self.q, self.p)\n if self.is_negative:\n- if expt.q != 1:\n- return -(S.NegativeOne)**((expt.p % expt.q) /\n- S(expt.q))*Rational(self.q, -self.p)**ne\n- else:\n- return S.NegativeOne**ne*Rational(self.q, -self.p)**ne\n+ return S.NegativeOne**expt*Rational(self.q, -self.p)**ne\n else:\n return Rational(self.q, self.p)**ne\n if expt is S.Infinity: # -oo already caught by test for negative\n@@ -2223,11 +2219,7 @@ def _eval_power(self, expt):\n # invert base and change sign on exponent\n ne = -expt\n if self.is_negative:\n- if expt.q != 1:\n- return -(S.NegativeOne)**((expt.p % expt.q) /\n- S(expt.q))*Rational(1, -self)**ne\n- else:\n- return (S.NegativeOne)**ne*Rational(1, -self)**ne\n+ return S.NegativeOne**expt*Rational(1, -self)**ne\n else:\n return Rational(1, self.p)**ne\n # see if base is a perfect root, sqrt(4) --> 2\n", - "expected_spans": { - "sympy/core/numbers.py": [ - "Rational._eval_power", - "Integer._eval_power" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-14308", - "repo": "sympy/sympy", - "base_commit": "fb536869fb7aa28b2695ad7a3b70949926b291c4", - "problem_statement": "vectors break pretty printing\n```py\r\nIn [1]: from sympy.vector import *\r\n\r\nIn [2]: e = CoordSysCartesian('e')\r\n\r\nIn [3]: (x/y)**t*e.j\r\nOut[3]:\r\n\u239b t\u239e e_j\r\n\u239c\u239bx\u239e e_j \u239f\r\n\u239c\u239c\u2500\u239f \u239f\r\n\u239d\u239dy\u23a0 \u23a0\r\n```\r\n\r\nAlso, when it does print correctly, the baseline is wrong (it should be centered). \n", - "golden_patch": "diff --git a/sympy/printing/pretty/pretty.py b/sympy/printing/pretty/pretty.py\n--- a/sympy/printing/pretty/pretty.py\n+++ b/sympy/printing/pretty/pretty.py\n@@ -931,26 +931,49 @@ def _print_BasisDependent(self, expr):\n #Fixing the newlines\n lengths = []\n strs = ['']\n+ flag = []\n for i, partstr in enumerate(o1):\n+ flag.append(0)\n # XXX: What is this hack?\n if '\\n' in partstr:\n tempstr = partstr\n tempstr = tempstr.replace(vectstrs[i], '')\n- tempstr = tempstr.replace(u'\\N{RIGHT PARENTHESIS UPPER HOOK}',\n- u'\\N{RIGHT PARENTHESIS UPPER HOOK}'\n- + ' ' + vectstrs[i])\n+ if u'\\N{right parenthesis extension}' in tempstr: # If scalar is a fraction\n+ for paren in range(len(tempstr)):\n+ flag[i] = 1\n+ if tempstr[paren] == u'\\N{right parenthesis extension}':\n+ tempstr = tempstr[:paren] + u'\\N{right parenthesis extension}'\\\n+ + ' ' + vectstrs[i] + tempstr[paren + 1:]\n+ break\n+ elif u'\\N{RIGHT PARENTHESIS LOWER HOOK}' in tempstr:\n+ flag[i] = 1\n+ tempstr = tempstr.replace(u'\\N{RIGHT PARENTHESIS LOWER HOOK}',\n+ u'\\N{RIGHT PARENTHESIS LOWER HOOK}'\n+ + ' ' + vectstrs[i])\n+ else:\n+ tempstr = tempstr.replace(u'\\N{RIGHT PARENTHESIS UPPER HOOK}',\n+ u'\\N{RIGHT PARENTHESIS UPPER HOOK}'\n+ + ' ' + vectstrs[i])\n o1[i] = tempstr\n+\n o1 = [x.split('\\n') for x in o1]\n- n_newlines = max([len(x) for x in o1])\n- for parts in o1:\n- lengths.append(len(parts[0]))\n+ n_newlines = max([len(x) for x in o1]) # Width of part in its pretty form\n+\n+ if 1 in flag: # If there was a fractional scalar\n+ for i, parts in enumerate(o1):\n+ if len(parts) == 1: # If part has no newline\n+ parts.insert(0, ' ' * (len(parts[0])))\n+ flag[i] = 1\n+\n+ for i, parts in enumerate(o1):\n+ lengths.append(len(parts[flag[i]]))\n for j in range(n_newlines):\n if j+1 <= len(parts):\n if j >= len(strs):\n strs.append(' ' * (sum(lengths[:-1]) +\n 3*(len(lengths)-1)))\n- if j == 0:\n- strs[0] += parts[0] + ' + '\n+ if j == flag[i]:\n+ strs[flag[i]] += parts[flag[i]] + ' + '\n else:\n strs[j] += parts[j] + ' '*(lengths[-1] -\n len(parts[j])+\n", - "expected_spans": { - "sympy/printing/pretty/pretty.py": [ - "PrettyPrinter._print_BasisDependent" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-14317", - "repo": "sympy/sympy", - "base_commit": "fb536869fb7aa28b2695ad7a3b70949926b291c4", - "problem_statement": "LaTeX printer does not use the same order of monomials as pretty and str \nWhen printing a Poly, the str and pretty printers use the logical order of monomials, from highest to lowest degrees. But latex printer does not. \r\n```\r\n>>> var('a b c x')\r\n>>> p = Poly([a, 1, b, 2, c, 3], x)\r\n>>> p\r\nPoly(a*x**5 + x**4 + b*x**3 + 2*x**2 + c*x + 3, x, domain='ZZ[a,b,c]')\r\n>>> pretty(p)\r\n\"Poly(a*x**5 + x**4 + b*x**3 + 2*x**2 + c*x + 3, x, domain='ZZ[a,b,c]')\"\r\n>>> latex(p)\r\n'\\\\operatorname{Poly}{\\\\left( a x^{5} + b x^{3} + c x + x^{4} + 2 x^{2} + 3, x, domain=\\\\mathbb{Z}\\\\left[a, b, c\\\\right] \\\\right)}'\r\n```\n", - "golden_patch": "diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -1813,7 +1813,50 @@ def _print_PolynomialRingBase(self, expr):\n \n def _print_Poly(self, poly):\n cls = poly.__class__.__name__\n- expr = self._print(poly.as_expr())\n+ terms = []\n+ for monom, coeff in poly.terms():\n+ s_monom = ''\n+ for i, exp in enumerate(monom):\n+ if exp > 0:\n+ if exp == 1:\n+ s_monom += self._print(poly.gens[i])\n+ else:\n+ s_monom += self._print(pow(poly.gens[i], exp))\n+\n+ if coeff.is_Add:\n+ if s_monom:\n+ s_coeff = r\"\\left(%s\\right)\" % self._print(coeff)\n+ else:\n+ s_coeff = self._print(coeff)\n+ else:\n+ if s_monom:\n+ if coeff is S.One:\n+ terms.extend(['+', s_monom])\n+ continue\n+\n+ if coeff is S.NegativeOne:\n+ terms.extend(['-', s_monom])\n+ continue\n+\n+ s_coeff = self._print(coeff)\n+\n+ if not s_monom:\n+ s_term = s_coeff\n+ else:\n+ s_term = s_coeff + \" \" + s_monom\n+\n+ if s_term.startswith('-'):\n+ terms.extend(['-', s_term[1:]])\n+ else:\n+ terms.extend(['+', s_term])\n+\n+ if terms[0] in ['-', '+']:\n+ modifier = terms.pop(0)\n+\n+ if modifier == '-':\n+ terms[0] = '-' + terms[0]\n+\n+ expr = ' '.join(terms)\n gens = list(map(self._print, poly.gens))\n domain = \"domain=%s\" % self._print(poly.get_domain())\n \n", - "expected_spans": { - "sympy/printing/latex.py": [ - "LatexPrinter._print_FourierSeries" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-14396", - "repo": "sympy/sympy", - "base_commit": "f35ad6411f86a15dd78db39c29d1e5291f66f9b5", - "problem_statement": "Poly(domain='RR[y,z]') doesn't work\n``` py\nIn [14]: Poly(1.2*x*y*z, x)\nOut[14]: Poly(1.2*y*z*x, x, domain='RR[y,z]')\n\nIn [15]: Poly(1.2*x*y*z, x, domain='RR[y,z]')\n---------------------------------------------------------------------------\nOptionError Traceback (most recent call last)\n in ()\n----> 1 Poly(1.2*x*y*z, x, domain='RR[y,z]')\n\n/Users/aaronmeurer/Documents/Python/sympy/sympy-scratch/sympy/polys/polytools.py in __new__(cls, rep, *gens, **args)\n 69 def __new__(cls, rep, *gens, **args):\n 70 \"\"\"Create a new polynomial instance out of something useful. \"\"\"\n---> 71 opt = options.build_options(gens, args)\n 72\n 73 if 'order' in opt:\n\n/Users/aaronmeurer/Documents/Python/sympy/sympy-scratch/sympy/polys/polyoptions.py in build_options(gens, args)\n 718\n 719 if len(args) != 1 or 'opt' not in args or gens:\n--> 720 return Options(gens, args)\n 721 else:\n 722 return args['opt']\n\n/Users/aaronmeurer/Documents/Python/sympy/sympy-scratch/sympy/polys/polyoptions.py in __init__(self, gens, args, flags, strict)\n 151 self[option] = cls.preprocess(value)\n 152\n--> 153 preprocess_options(args)\n 154\n 155 for key, value in dict(defaults).items():\n\n/Users/aaronmeurer/Documents/Python/sympy/sympy-scratch/sympy/polys/polyoptions.py in preprocess_options(args)\n 149\n 150 if value is not None:\n--> 151 self[option] = cls.preprocess(value)\n 152\n 153 preprocess_options(args)\n\n/Users/aaronmeurer/Documents/Python/sympy/sympy-scratch/sympy/polys/polyoptions.py in preprocess(cls, domain)\n 480 return sympy.polys.domains.QQ.algebraic_field(*gens)\n 481\n--> 482 raise OptionError('expected a valid domain specification, got %s' % domain)\n 483\n 484 @classmethod\n\nOptionError: expected a valid domain specification, got RR[y,z]\n```\n\nAlso, the wording of error message could be improved\n\n", - "golden_patch": "diff --git a/sympy/polys/polyoptions.py b/sympy/polys/polyoptions.py\n--- a/sympy/polys/polyoptions.py\n+++ b/sympy/polys/polyoptions.py\n@@ -405,7 +405,7 @@ class Domain(with_metaclass(OptionType, Option)):\n _re_realfield = re.compile(r\"^(R|RR)(_(\\d+))?$\")\n _re_complexfield = re.compile(r\"^(C|CC)(_(\\d+))?$\")\n _re_finitefield = re.compile(r\"^(FF|GF)\\((\\d+)\\)$\")\n- _re_polynomial = re.compile(r\"^(Z|ZZ|Q|QQ)\\[(.+)\\]$\")\n+ _re_polynomial = re.compile(r\"^(Z|ZZ|Q|QQ|R|RR|C|CC)\\[(.+)\\]$\")\n _re_fraction = re.compile(r\"^(Z|ZZ|Q|QQ)\\((.+)\\)$\")\n _re_algebraic = re.compile(r\"^(Q|QQ)\\<(.+)\\>$\")\n \n@@ -459,8 +459,12 @@ def preprocess(cls, domain):\n \n if ground in ['Z', 'ZZ']:\n return sympy.polys.domains.ZZ.poly_ring(*gens)\n- else:\n+ elif ground in ['Q', 'QQ']:\n return sympy.polys.domains.QQ.poly_ring(*gens)\n+ elif ground in ['R', 'RR']:\n+ return sympy.polys.domains.RR.poly_ring(*gens)\n+ else:\n+ return sympy.polys.domains.CC.poly_ring(*gens)\n \n r = cls._re_fraction.match(domain)\n \n", - "expected_spans": { - "sympy/polys/polyoptions.py": [ - "Domain", - "Domain.preprocess" - ] - }, - "resolved_by": [ - { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/sympy/polys/polyoptions.py b/sympy/polys/polyoptions.py\nindex 6e504a3057..58cfa108b9 100644\n--- a/sympy/polys/polyoptions.py\n+++ b/sympy/polys/polyoptions.py\n@@ -409,6 +409,9 @@ class Domain(with_metaclass(OptionType, Option)):\n _re_fraction = re.compile(r\"^(Z|ZZ|Q|QQ)\\((.+)\\)$\")\n _re_algebraic = re.compile(r\"^(Q|QQ)\\<(.+)\\>$\")\n \n+ _re_realfield_with_ext = re.compile(r\"^(R|RR)\\[(.+)\\]$\")\n+ _re_complexfield_with_ext = re.compile(r\"^(C|CC)\\[(.+)\\]$\")\n+\n @classmethod\n def preprocess(cls, domain):\n if isinstance(domain, sympy.polys.domains.Domain):\n@@ -425,6 +428,20 @@ def preprocess(cls, domain):\n if domain == 'EX':\n return sympy.polys.domains.EX\n \n+ r = cls._re_realfield_with_ext.match(domain)\n+\n+ if r is not None:\n+ base, gens = r.groups()\n+ gens = list(map(sympify, gens.split(',')))\n+ return sympy.polys.domains.RR.poly_ring(*gens)\n+\n+ r = cls._re_complexfield_with_ext.match(domain)\n+\n+ if r is not None:\n+ base, gens = r.groups()\n+ gens = list(map(sympify, gens.split(',')))\n+ return sympy.polys.domains.CC.poly_ring(*gens)\n+\n r = cls._re_realfield.match(domain)\n \n if r is not None:\n" - } - ], - "alternative_spans": [ + }, { - "run_name": "20240527_marscode-agent-dev", + "run_name": "20241016_IBM-SWE-1.0", "spans": { - "sympy/polys/polyoptions.py": [ - "Domain.preprocess" + "requests/sessions.py": [ + "Session.request" ] } - } - ] - }, - { - "instance_id": "scikit-learn__scikit-learn-10949", - "repo": "scikit-learn/scikit-learn", - "base_commit": "3b5abf76597ce6aff76192869f92647c1b5259e7", - "problem_statement": "warn_on_dtype with DataFrame\n#### Description\r\n\r\n``warn_on_dtype`` has no effect when input is a pandas ``DataFrame``\r\n\r\n#### Steps/Code to Reproduce\r\n```python\r\nfrom sklearn.utils.validation import check_array\r\nimport pandas as pd\r\ndf = pd.DataFrame([[1, 2, 3], [2, 3, 4]], dtype=object)\r\nchecked = check_array(df, warn_on_dtype=True)\r\n```\r\n\r\n#### Expected result: \r\n\r\n```python-traceback\r\nDataConversionWarning: Data with input dtype object was converted to float64.\r\n```\r\n\r\n#### Actual Results\r\nNo warning is thrown\r\n\r\n#### Versions\r\nLinux-4.4.0-116-generic-x86_64-with-debian-stretch-sid\r\nPython 3.6.3 |Anaconda, Inc.| (default, Nov 3 2017, 19:19:16) \r\n[GCC 7.2.0]\r\nNumPy 1.13.1\r\nSciPy 0.19.1\r\nScikit-Learn 0.20.dev0\r\nPandas 0.21.0\r\n\nwarn_on_dtype with DataFrame\n#### Description\r\n\r\n``warn_on_dtype`` has no effect when input is a pandas ``DataFrame``\r\n\r\n#### Steps/Code to Reproduce\r\n```python\r\nfrom sklearn.utils.validation import check_array\r\nimport pandas as pd\r\ndf = pd.DataFrame([[1, 2, 3], [2, 3, 4]], dtype=object)\r\nchecked = check_array(df, warn_on_dtype=True)\r\n```\r\n\r\n#### Expected result: \r\n\r\n```python-traceback\r\nDataConversionWarning: Data with input dtype object was converted to float64.\r\n```\r\n\r\n#### Actual Results\r\nNo warning is thrown\r\n\r\n#### Versions\r\nLinux-4.4.0-116-generic-x86_64-with-debian-stretch-sid\r\nPython 3.6.3 |Anaconda, Inc.| (default, Nov 3 2017, 19:19:16) \r\n[GCC 7.2.0]\r\nNumPy 1.13.1\r\nSciPy 0.19.1\r\nScikit-Learn 0.20.dev0\r\nPandas 0.21.0\r\n\n", - "golden_patch": "diff --git a/sklearn/utils/validation.py b/sklearn/utils/validation.py\n--- a/sklearn/utils/validation.py\n+++ b/sklearn/utils/validation.py\n@@ -466,6 +466,12 @@ def check_array(array, accept_sparse=False, accept_large_sparse=True,\n # not a data type (e.g. a column named dtype in a pandas DataFrame)\n dtype_orig = None\n \n+ # check if the object contains several dtypes (typically a pandas\n+ # DataFrame), and store them. If not, store None.\n+ dtypes_orig = None\n+ if hasattr(array, \"dtypes\") and hasattr(array, \"__array__\"):\n+ dtypes_orig = np.array(array.dtypes)\n+\n if dtype_numeric:\n if dtype_orig is not None and dtype_orig.kind == \"O\":\n # if input is object, convert to float.\n@@ -581,6 +587,16 @@ def check_array(array, accept_sparse=False, accept_large_sparse=True,\n if copy and np.may_share_memory(array, array_orig):\n array = np.array(array, dtype=dtype, order=order)\n \n+ if (warn_on_dtype and dtypes_orig is not None and\n+ {array.dtype} != set(dtypes_orig)):\n+ # if there was at the beginning some other types than the final one\n+ # (for instance in a DataFrame that can contain several dtypes) then\n+ # some data must have been converted\n+ msg = (\"Data with input dtype %s were all converted to %s%s.\"\n+ % (', '.join(map(str, sorted(set(dtypes_orig)))), array.dtype,\n+ context))\n+ warnings.warn(msg, DataConversionWarning, stacklevel=3)\n+\n return array\n \n \n", - "expected_spans": { - "sklearn/utils/validation.py": [ - "check_array" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "scikit-learn__scikit-learn-11040", - "repo": "scikit-learn/scikit-learn", - "base_commit": "96a02f3934952d486589dddd3f00b40d5a5ab5f2", - "problem_statement": "Missing parameter validation in Neighbors estimator for float n_neighbors\n```python\r\nfrom sklearn.neighbors import NearestNeighbors\r\nfrom sklearn.datasets import make_blobs\r\nX, y = make_blobs()\r\nneighbors = NearestNeighbors(n_neighbors=3.)\r\nneighbors.fit(X)\r\nneighbors.kneighbors(X)\r\n```\r\n```\r\n~/checkout/scikit-learn/sklearn/neighbors/binary_tree.pxi in sklearn.neighbors.kd_tree.NeighborsHeap.__init__()\r\n\r\nTypeError: 'float' object cannot be interpreted as an integer\r\n```\r\nThis should be caught earlier and a more helpful error message should be raised (or we could be lenient and cast to integer, but I think a better error might be better).\r\n\r\nWe need to make sure that \r\n```python\r\nneighbors.kneighbors(X, n_neighbors=3.)\r\n```\r\nalso works.\n", - "golden_patch": "diff --git a/sklearn/neighbors/base.py b/sklearn/neighbors/base.py\n--- a/sklearn/neighbors/base.py\n+++ b/sklearn/neighbors/base.py\n@@ -258,6 +258,12 @@ def _fit(self, X):\n \"Expected n_neighbors > 0. Got %d\" %\n self.n_neighbors\n )\n+ else:\n+ if not np.issubdtype(type(self.n_neighbors), np.integer):\n+ raise TypeError(\n+ \"n_neighbors does not take %s value, \"\n+ \"enter integer value\" %\n+ type(self.n_neighbors))\n \n return self\n \n@@ -327,6 +333,17 @@ class from an array representing our data set and ask who's\n \n if n_neighbors is None:\n n_neighbors = self.n_neighbors\n+ elif n_neighbors <= 0:\n+ raise ValueError(\n+ \"Expected n_neighbors > 0. Got %d\" %\n+ n_neighbors\n+ )\n+ else:\n+ if not np.issubdtype(type(n_neighbors), np.integer):\n+ raise TypeError(\n+ \"n_neighbors does not take %s value, \"\n+ \"enter integer value\" %\n+ type(n_neighbors))\n \n if X is not None:\n query_is_train = False\n", - "expected_spans": { - "sklearn/neighbors/base.py": [] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-14774", - "repo": "sympy/sympy", - "base_commit": "8fc63c2d71752389a44367b8ef4aba8a91af6a45", - "problem_statement": "Latex printer does not support full inverse trig function names for acsc and asec\nFor example\r\n`latex(asin(x), inv_trig_style=\"full\")` works as expected returning `'\\\\arcsin{\\\\left (x \\\\right )}'`\r\nBut `latex(acsc(x), inv_trig_style=\"full\")` gives `'\\\\operatorname{acsc}{\\\\left (x \\\\right )}'` instead of `'\\\\operatorname{arccsc}{\\\\left (x \\\\right )}'`\r\n\r\nA fix seems to be to change line 743 of sympy/printing/latex.py from\r\n`inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acot\"]` to\r\n`inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acsc\", \"asec\", \"acot\"]`\n", - "golden_patch": "diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -740,7 +740,7 @@ def _print_Function(self, expr, exp=None):\n len(args) == 1 and \\\n not self._needs_function_brackets(expr.args[0])\n \n- inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acot\"]\n+ inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acsc\", \"asec\", \"acot\"]\n \n # If the function is an inverse trig function, handle the style\n if func in inv_trig_table:\n", - "expected_spans": { - "sympy/printing/latex.py": [ - "LatexPrinter._print_Function" - ] - }, - "resolved_by": [ + }, { - "name": "20240402_sweagent_claude3opus", - "patch": "diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\nindex a3ead157c1..f3b7669e96 100644\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -740,7 +740,7 @@ def _print_Function(self, expr, exp=None):\n len(args) == 1 and \\\n not self._needs_function_brackets(expr.args[0])\n \n- inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acot\"]\n+ inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acsc\", \"asec\", \"acot\"]\n \n # If the function is an inverse trig function, handle the style\n if func in inv_trig_table:\n" + "run_name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240402_sweagent_gpt4", - "patch": "diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\nindex a3ead157c1..f3b7669e96 100644\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -740,7 +740,7 @@ def _print_Function(self, expr, exp=None):\n len(args) == 1 and \\\n not self._needs_function_brackets(expr.args[0])\n \n- inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acot\"]\n+ inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acsc\", \"asec\", \"acot\"]\n \n # If the function is an inverse trig function, handle the style\n if func in inv_trig_table:\n" + "run_name": "20240612_IBM_Research_Agent101", + "spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240509_amazon-q-developer-agent-20240430-dev", - "patch": "--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -737,13 +737,13 @@\n inv_trig_power_case = False\n # If it is applicable to fold the argument brackets\n can_fold_brackets = self._settings['fold_func_brackets'] and \\\n len(args) == 1 and \\\n not self._needs_function_brackets(expr.args[0])\n \n- inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acot\"]\n+ inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acsc\", \"asec\", \"acot\"]\n \n # If the function is an inverse trig function, handle the style\n if func in inv_trig_table:\n if inv_trig_style == \"abbreviated\":\n func = func\n elif inv_trig_style == \"full\":\n" + "run_name": "20240621_autocoderover-v20240620", + "spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240523_aider", - "patch": "diff --git a/sympy/parsing/sympy_parser.py b/sympy/parsing/sympy_parser.py\nindex 3e0cab7d9b..0b12c6ac17 100644\n--- a/sympy/parsing/sympy_parser.py\n+++ b/sympy/parsing/sympy_parser.py\n@@ -994,7 +994,7 @@ def visit_BinOp(self, node):\n right = ast.Call(\n func=ast.Name(id='Mul', ctx=ast.Load()),\n args=[ast.UnaryOp(op=ast.USub(), operand=ast.Num(1)), right],\n- keywords=[ast.keyword(arg='evaluate', value=ast.Name(id='False', ctx=ast.Load()))],\n+ keywords=[ast.keyword(arg='evaluate', value=ast.NameConstant(value=False))],\n starargs=None,\n kwargs=None\n )\n@@ -1005,7 +1005,7 @@ def visit_BinOp(self, node):\n left = ast.Call(\n func=ast.Name(id='Pow', ctx=ast.Load()),\n args=[left, ast.UnaryOp(op=ast.USub(), operand=ast.Num(1))],\n- keywords=[ast.keyword(arg='evaluate', value=ast.Name(id='False', ctx=ast.Load()))],\n+ keywords=[ast.keyword(arg='evaluate', value=ast.NameConstant(value=False))],\n starargs=None,\n kwargs=None\n )\n@@ -1013,7 +1013,7 @@ def visit_BinOp(self, node):\n right = ast.Call(\n func=ast.Name(id='Pow', ctx=ast.Load()),\n args=[right, ast.UnaryOp(op=ast.USub(), operand=ast.Num(1))],\n- keywords=[ast.keyword(arg='evaluate', value=ast.Name(id='False', ctx=ast.Load()))],\n+ keywords=[ast.keyword(arg='evaluate', value=ast.NameConstant(value=False))],\n starargs=None,\n kwargs=None\n )\n@@ -1021,7 +1021,7 @@ def visit_BinOp(self, node):\n new_node = ast.Call(\n func=ast.Name(id=sympy_class, ctx=ast.Load()),\n args=[left, right],\n- keywords=[ast.keyword(arg='evaluate', value=ast.Name(id='False', ctx=ast.Load()))],\n+ keywords=[ast.keyword(arg='evaluate', value=ast.NameConstant(value=False))],\n starargs=None,\n kwargs=None\n )\ndiff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\nindex a3ead157c1..f3b7669e96 100644\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -740,7 +740,7 @@ def _print_Function(self, expr, exp=None):\n len(args) == 1 and \\\n not self._needs_function_brackets(expr.args[0])\n \n- inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acot\"]\n+ inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acsc\", \"asec\", \"acot\"]\n \n # If the function is an inverse trig function, handle the style\n if func in inv_trig_table:\n" + "run_name": "20240617_factory_code_droid", + "spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -740,7 +740,7 @@\n len(args) == 1 and \\\n not self._needs_function_brackets(expr.args[0])\n \n- inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acot\"]\n+ inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acot\", \"acsc\", \"asec\"]\n \n # If the function is an inverse trig function, handle the style\n if func in inv_trig_table:\n" + "run_name": "20240402_sweagent_gpt4", + "spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\nindex a3ead157c1..f3b7669e96 100644\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -740,7 +740,7 @@ def _print_Function(self, expr, exp=None):\n len(args) == 1 and \\\n not self._needs_function_brackets(expr.args[0])\n \n- inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acot\"]\n+ inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acsc\", \"asec\", \"acot\"]\n \n # If the function is an inverse trig function, handle the style\n if func in inv_trig_table:\n" + "run_name": "20240912_marscode-agent-dev", + "spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\nindex a3ead157c1..e1e60c1d1e 100644\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -740,7 +740,7 @@ def _print_Function(self, expr, exp=None):\n len(args) == 1 and \\\n not self._needs_function_brackets(expr.args[0])\n \n- inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acot\"]\n+ inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acot\", \"acsc\", \"asec\"]\n \n # If the function is an inverse trig function, handle the style\n if func in inv_trig_table:\n" + "run_name": "20240617_moatless_gpt4o", + "spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\nindex a3ead157c1..f3b7669e96 100644\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -740,7 +740,7 @@ def _print_Function(self, expr, exp=None):\n len(args) == 1 and \\\n not self._needs_function_brackets(expr.args[0])\n \n- inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acot\"]\n+ inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acsc\", \"asec\", \"acot\"]\n \n # If the function is an inverse trig function, handle the style\n if func in inv_trig_table:\n" + "run_name": "20240811_gru", + "spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\nindex a3ead157c1..e1e60c1d1e 100644\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -740,7 +740,7 @@ def _print_Function(self, expr, exp=None):\n len(args) == 1 and \\\n not self._needs_function_brackets(expr.args[0])\n \n- inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acot\"]\n+ inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acot\", \"acsc\", \"asec\"]\n \n # If the function is an inverse trig function, handle the style\n if func in inv_trig_table:\n" + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "requests/sessions.py": [ + "Session.request" + ] + } }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\nindex a3ead157c1..f3b7669e96 100644\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -740,7 +740,7 @@ def _print_Function(self, expr, exp=None):\n len(args) == 1 and \\\n not self._needs_function_brackets(expr.args[0])\n \n- inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acot\"]\n+ inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acsc\", \"asec\", \"acot\"]\n \n # If the function is an inverse trig function, handle the style\n if func in inv_trig_table:\n" + "run_name": "20240620_sweagent_claude3.5sonnet", + "spans": { + "requests/sessions.py": [ + "Session.request" + ] + } } - ], - "alternative_spans": [] + ] }, { - "instance_id": "scikit-learn__scikit-learn-11281", - "repo": "scikit-learn/scikit-learn", - "base_commit": "4143356c3c51831300789e4fdf795d83716dbab6", - "problem_statement": "Should mixture models have a clusterer-compatible interface\nMixture models are currently a bit different. They are basically clusterers, except they are probabilistic, and are applied to inductive problems unlike many clusterers. But they are unlike clusterers in API:\r\n* they have an `n_components` parameter, with identical purpose to `n_clusters`\r\n* they do not store the `labels_` of the training data\r\n* they do not have a `fit_predict` method\r\n\r\nAnd they are almost entirely documented separately.\r\n\r\nShould we make the MMs more like clusterers?\n", - "golden_patch": "diff --git a/sklearn/mixture/base.py b/sklearn/mixture/base.py\n--- a/sklearn/mixture/base.py\n+++ b/sklearn/mixture/base.py\n@@ -172,7 +172,7 @@ def _initialize(self, X, resp):\n def fit(self, X, y=None):\n \"\"\"Estimate model parameters with the EM algorithm.\n \n- The method fit the model `n_init` times and set the parameters with\n+ The method fits the model `n_init` times and set the parameters with\n which the model has the largest likelihood or lower bound. Within each\n trial, the method iterates between E-step and M-step for `max_iter`\n times until the change of likelihood or lower bound is less than\n@@ -188,6 +188,32 @@ def fit(self, X, y=None):\n -------\n self\n \"\"\"\n+ self.fit_predict(X, y)\n+ return self\n+\n+ def fit_predict(self, X, y=None):\n+ \"\"\"Estimate model parameters using X and predict the labels for X.\n+\n+ The method fits the model n_init times and sets the parameters with\n+ which the model has the largest likelihood or lower bound. Within each\n+ trial, the method iterates between E-step and M-step for `max_iter`\n+ times until the change of likelihood or lower bound is less than\n+ `tol`, otherwise, a `ConvergenceWarning` is raised. After fitting, it\n+ predicts the most probable label for the input data points.\n+\n+ .. versionadded:: 0.20\n+\n+ Parameters\n+ ----------\n+ X : array-like, shape (n_samples, n_features)\n+ List of n_features-dimensional data points. Each row\n+ corresponds to a single data point.\n+\n+ Returns\n+ -------\n+ labels : array, shape (n_samples,)\n+ Component labels.\n+ \"\"\"\n X = _check_X(X, self.n_components, ensure_min_samples=2)\n self._check_initial_parameters(X)\n \n@@ -240,7 +266,7 @@ def fit(self, X, y=None):\n self._set_parameters(best_params)\n self.n_iter_ = best_n_iter\n \n- return self\n+ return log_resp.argmax(axis=1)\n \n def _e_step(self, X):\n \"\"\"E step.\n", + "instance_id": "psf__requests-2674", + "repo": "psf/requests", + "base_commit": "0be38a0c37c59c4b66ce908731da15b401655113", + "problem_statement": "urllib3 exceptions passing through requests API\nI don't know if it's a design goal of requests to hide urllib3's exceptions and wrap them around requests.exceptions types.\n\n(If it's not IMHO it should be, but that's another discussion)\n\nIf it is, I have at least two of them passing through that I have to catch in addition to requests' exceptions. They are requests.packages.urllib3.exceptions.DecodeError and requests.packages.urllib3.exceptions.TimeoutError (this one I get when a proxy timeouts)\n\nThanks!\n\n", + "golden_patch": "diff --git a/requests/adapters.py b/requests/adapters.py\n--- a/requests/adapters.py\n+++ b/requests/adapters.py\n@@ -19,6 +19,7 @@\n from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,\n prepend_scheme_if_needed, get_auth_from_url, urldefragauth)\n from .structures import CaseInsensitiveDict\n+from .packages.urllib3.exceptions import ClosedPoolError\n from .packages.urllib3.exceptions import ConnectTimeoutError\n from .packages.urllib3.exceptions import HTTPError as _HTTPError\n from .packages.urllib3.exceptions import MaxRetryError\n@@ -421,6 +422,9 @@ def send(self, request, stream=False, timeout=None, verify=True, cert=None, prox\n \n raise ConnectionError(e, request=request)\n \n+ except ClosedPoolError as e:\n+ raise ConnectionError(e, request=request)\n+\n except _ProxyError as e:\n raise ProxyError(e)\n \n", + "test_patch": "diff --git a/test_requests.py b/test_requests.py\n--- a/test_requests.py\n+++ b/test_requests.py\n@@ -1655,6 +1655,16 @@ def test_urllib3_retries():\n with pytest.raises(RetryError):\n s.get(httpbin('status/500'))\n \n+\n+def test_urllib3_pool_connection_closed():\n+ s = requests.Session()\n+ s.mount('http://', HTTPAdapter(pool_connections=0, pool_maxsize=0))\n+\n+ try:\n+ s.get(httpbin('status/200'))\n+ except ConnectionError as e:\n+ assert u\"HTTPConnectionPool(host='httpbin.org', port=80): Pool is closed.\" in str(e.message)\n+\n def test_vendor_aliases():\n from requests.packages import urllib3\n from requests.packages import chardet\n", + "fail_to_pass": "[\"test_requests.py::RequestsTestCase::test_BASICAUTH_TUPLE_HTTP_200_OK_GET\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_GET_ALTERNATIVE\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_GET_WITH_PARAMS\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_HEAD\", \"test_requests.py::RequestsTestCase::test_auth_is_retained_for_redirect_on_host\", \"test_requests.py::RequestsTestCase::test_different_encodings_dont_break_post\", \"test_requests.py::RequestsTestCase::test_manual_redirect_with_partial_body_read\", \"test_requests.py::RequestsTestCase::test_mixed_case_scheme_acceptable\", \"test_requests.py::RequestsTestCase::test_prepared_from_session\", \"test_requests.py::RequestsTestCase::test_unicode_multipart_post\", \"test_requests.py::TestTimeout::test_none_timeout\", \"test_requests.py::TestTimeout::test_encoded_methods\"]", + "pass_to_pass": "[\"test_requests.py::RequestsTestCase::test_DIGESTAUTH_QUOTES_QOP_VALUE\", \"test_requests.py::RequestsTestCase::test_DIGESTAUTH_WRONG_HTTP_401_GET\", \"test_requests.py::RequestsTestCase::test_DIGEST_AUTH_RETURNS_COOKIE\", \"test_requests.py::RequestsTestCase::test_DIGEST_AUTH_SETS_SESSION_COOKIES\", \"test_requests.py::RequestsTestCase::test_DIGEST_STREAM\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_GET_WITH_MIXED_PARAMS\", \"test_requests.py::RequestsTestCase::test_HTTP_200_OK_PUT\", \"test_requests.py::RequestsTestCase::test_LocationParseError\", \"test_requests.py::RequestsTestCase::test_POSTBIN_GET_POST_FILES\", \"test_requests.py::RequestsTestCase::test_POSTBIN_GET_POST_FILES_WITH_DATA\", \"test_requests.py::RequestsTestCase::test_auth_is_stripped_on_redirect_off_host\", \"test_requests.py::RequestsTestCase::test_autoset_header_values_are_native\", \"test_requests.py::RequestsTestCase::test_basic_auth_str_is_always_native\", \"test_requests.py::RequestsTestCase::test_basic_building\", \"test_requests.py::RequestsTestCase::test_basicauth_with_netrc\", \"test_requests.py::RequestsTestCase::test_can_send_bytes_bytearray_objects_with_files\", \"test_requests.py::RequestsTestCase::test_can_send_file_object_with_non_string_filename\", \"test_requests.py::RequestsTestCase::test_can_send_nonstring_objects_with_files\", \"test_requests.py::RequestsTestCase::test_cannot_send_unprepared_requests\", \"test_requests.py::RequestsTestCase::test_connection_error_invalid_domain\", \"test_requests.py::RequestsTestCase::test_connection_error_invalid_port\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_items\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_keeps_items\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_keeps_len\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_keys\", \"test_requests.py::RequestsTestCase::test_cookie_as_dict_values\", \"test_requests.py::RequestsTestCase::test_cookie_parameters\", \"test_requests.py::RequestsTestCase::test_cookie_persists_via_api\", \"test_requests.py::RequestsTestCase::test_cookie_quote_wrapped\", \"test_requests.py::RequestsTestCase::test_cookie_removed_on_expire\", \"test_requests.py::RequestsTestCase::test_custom_content_type\", \"test_requests.py::RequestsTestCase::test_decompress_gzip\", \"test_requests.py::RequestsTestCase::test_entry_points\", \"test_requests.py::RequestsTestCase::test_fixes_1329\", \"test_requests.py::RequestsTestCase::test_generic_cookiejar_works\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url_encoded_hashes\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url_encoded_spaces\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url_not_encoded_spaces\", \"test_requests.py::RequestsTestCase::test_get_auth_from_url_percent_chars\", \"test_requests.py::RequestsTestCase::test_header_keys_are_native\", \"test_requests.py::RequestsTestCase::test_header_remove_is_case_insensitive\", \"test_requests.py::RequestsTestCase::test_headers_on_session_with_None_are_not_sent\", \"test_requests.py::RequestsTestCase::test_history_is_always_a_list\", \"test_requests.py::RequestsTestCase::test_hook_receives_request_arguments\", \"test_requests.py::RequestsTestCase::test_http_error\", \"test_requests.py::RequestsTestCase::test_invalid_url\", \"test_requests.py::RequestsTestCase::test_json_param_post_content_type_works\", \"test_requests.py::RequestsTestCase::test_links\", \"test_requests.py::RequestsTestCase::test_long_authinfo_in_url\", \"test_requests.py::RequestsTestCase::test_no_content_length\", \"test_requests.py::RequestsTestCase::test_nonhttp_schemes_dont_check_URLs\", \"test_requests.py::RequestsTestCase::test_override_content_length\", \"test_requests.py::RequestsTestCase::test_params_are_added_before_fragment\", \"test_requests.py::RequestsTestCase::test_params_are_merged_case_sensitive\", \"test_requests.py::RequestsTestCase::test_path_is_not_double_encoded\", \"test_requests.py::RequestsTestCase::test_prepare_request_with_bytestring_url\", \"test_requests.py::RequestsTestCase::test_prepared_request_hook\", \"test_requests.py::RequestsTestCase::test_pyopenssl_redirect\", \"test_requests.py::RequestsTestCase::test_redirect_with_wrong_gzipped_header\", \"test_requests.py::RequestsTestCase::test_request_and_response_are_pickleable\", \"test_requests.py::RequestsTestCase::test_request_cookie_overrides_session_cookie\", \"test_requests.py::RequestsTestCase::test_request_cookies_not_persisted\", \"test_requests.py::RequestsTestCase::test_request_ok_set\", \"test_requests.py::RequestsTestCase::test_requests_in_history_are_not_overridden\", \"test_requests.py::RequestsTestCase::test_response_decode_unicode\", \"test_requests.py::RequestsTestCase::test_response_is_iterable\", \"test_requests.py::RequestsTestCase::test_response_iter_lines\", \"test_requests.py::RequestsTestCase::test_session_hooks_are_overriden_by_request_hooks\", \"test_requests.py::RequestsTestCase::test_session_hooks_are_used_with_no_request_hooks\", \"test_requests.py::RequestsTestCase::test_session_pickling\", \"test_requests.py::RequestsTestCase::test_set_cookie_on_301\", \"test_requests.py::RequestsTestCase::test_status_raising\", \"test_requests.py::RequestsTestCase::test_time_elapsed_blank\", \"test_requests.py::RequestsTestCase::test_transport_adapter_ordering\", \"test_requests.py::RequestsTestCase::test_unconsumed_session_response_closes_connection\", \"test_requests.py::RequestsTestCase::test_unicode_get\", \"test_requests.py::RequestsTestCase::test_unicode_header_name\", \"test_requests.py::RequestsTestCase::test_unicode_method_name\", \"test_requests.py::RequestsTestCase::test_unicode_multipart_post_fieldnames\", \"test_requests.py::RequestsTestCase::test_uppercase_scheme_redirect\", \"test_requests.py::RequestsTestCase::test_urlencoded_get_query_multivalued_param\", \"test_requests.py::RequestsTestCase::test_user_agent_transfers\", \"test_requests.py::TestContentEncodingDetection::test_html4_pragma\", \"test_requests.py::TestContentEncodingDetection::test_html_charset\", \"test_requests.py::TestContentEncodingDetection::test_none\", \"test_requests.py::TestContentEncodingDetection::test_precedence\", \"test_requests.py::TestContentEncodingDetection::test_xhtml_pragma\", \"test_requests.py::TestContentEncodingDetection::test_xml\", \"test_requests.py::TestCaseInsensitiveDict::test_contains\", \"test_requests.py::TestCaseInsensitiveDict::test_copy\", \"test_requests.py::TestCaseInsensitiveDict::test_delitem\", \"test_requests.py::TestCaseInsensitiveDict::test_docstring_example\", \"test_requests.py::TestCaseInsensitiveDict::test_equality\", \"test_requests.py::TestCaseInsensitiveDict::test_fixes_649\", \"test_requests.py::TestCaseInsensitiveDict::test_get\", \"test_requests.py::TestCaseInsensitiveDict::test_getitem\", \"test_requests.py::TestCaseInsensitiveDict::test_iter\", \"test_requests.py::TestCaseInsensitiveDict::test_iterable_init\", \"test_requests.py::TestCaseInsensitiveDict::test_kwargs_init\", \"test_requests.py::TestCaseInsensitiveDict::test_len\", \"test_requests.py::TestCaseInsensitiveDict::test_lower_items\", \"test_requests.py::TestCaseInsensitiveDict::test_mapping_init\", \"test_requests.py::TestCaseInsensitiveDict::test_preserve_key_case\", \"test_requests.py::TestCaseInsensitiveDict::test_preserve_last_key_case\", \"test_requests.py::TestCaseInsensitiveDict::test_repr\", \"test_requests.py::TestCaseInsensitiveDict::test_setdefault\", \"test_requests.py::TestCaseInsensitiveDict::test_update\", \"test_requests.py::TestCaseInsensitiveDict::test_update_retains_unchanged\", \"test_requests.py::UtilsTestCase::test_address_in_network\", \"test_requests.py::UtilsTestCase::test_dotted_netmask\", \"test_requests.py::UtilsTestCase::test_get_auth_from_url\", \"test_requests.py::UtilsTestCase::test_get_environ_proxies\", \"test_requests.py::UtilsTestCase::test_get_environ_proxies_ip_ranges\", \"test_requests.py::UtilsTestCase::test_guess_filename_when_filename_is_an_int\", \"test_requests.py::UtilsTestCase::test_guess_filename_when_int\", \"test_requests.py::UtilsTestCase::test_guess_filename_with_file_like_obj\", \"test_requests.py::UtilsTestCase::test_guess_filename_with_unicode_name\", \"test_requests.py::UtilsTestCase::test_is_ipv4_address\", \"test_requests.py::UtilsTestCase::test_is_valid_cidr\", \"test_requests.py::UtilsTestCase::test_requote_uri_properly_requotes\", \"test_requests.py::UtilsTestCase::test_requote_uri_with_unquoted_percents\", \"test_requests.py::UtilsTestCase::test_super_len_io_streams\", \"test_requests.py::TestMorselToCookieExpires::test_expires_invalid_int\", \"test_requests.py::TestMorselToCookieExpires::test_expires_invalid_str\", \"test_requests.py::TestMorselToCookieExpires::test_expires_none\", \"test_requests.py::TestMorselToCookieExpires::test_expires_valid_str\", \"test_requests.py::TestMorselToCookieMaxAge::test_max_age_invalid_str\", \"test_requests.py::TestMorselToCookieMaxAge::test_max_age_valid_int\", \"test_requests.py::TestTimeout::test_stream_timeout\", \"test_requests.py::TestTimeout::test_invalid_timeout\", \"test_requests.py::TestTimeout::test_read_timeout\", \"test_requests.py::TestTimeout::test_connect_timeout\", \"test_requests.py::TestTimeout::test_total_timeout_connect\", \"test_requests.py::TestRedirects::test_requests_are_updated_each_time\", \"test_requests.py::test_data_argument_accepts_tuples\", \"test_requests.py::test_prepared_request_empty_copy\", \"test_requests.py::test_prepared_request_no_cookies_copy\", \"test_requests.py::test_prepared_request_complete_copy\", \"test_requests.py::test_prepare_unicode_url\", \"test_requests.py::test_urllib3_retries\", \"test_requests.py::test_vendor_aliases\"]", "expected_spans": { - "sklearn/mixture/base.py": [ - "BaseMixture.fit" + "requests/adapters.py": [ + "imports", + "HTTPAdapter.send" + ] + }, + "test_file_spans": { + "test_requests.py": [ + "test_vendor_aliases" ] }, "resolved_by": [ - { - "name": "20240402_sweagent_claude3opus", - "patch": "diff --git a/sklearn/mixture/base.py b/sklearn/mixture/base.py\nindex a9f66740f..4da75d191 100644\n--- a/sklearn/mixture/base.py\n+++ b/sklearn/mixture/base.py\n@@ -84,7 +84,7 @@ class BaseMixture(six.with_metaclass(ABCMeta, DensityMixin, BaseEstimator)):\n self.warm_start = warm_start\n self.verbose = verbose\n self.verbose_interval = verbose_interval\n-\n+ self.labels_ = None\n def _check_initial_parameters(self, X):\n \"\"\"Check values of the basic parameters.\n \n@@ -238,7 +238,7 @@ class BaseMixture(six.with_metaclass(ABCMeta, DensityMixin, BaseEstimator)):\n % (init + 1), ConvergenceWarning)\n \n self._set_parameters(best_params)\n- self.n_iter_ = best_n_iter\n+ self.labels_ = self.predict(X)\n \n return self\n \n@@ -501,3 +501,25 @@ class BaseMixture(six.with_metaclass(ABCMeta, DensityMixin, BaseEstimator)):\n elif self.verbose >= 2:\n print(\"Initialization converged: %s\\t time lapse %.5fs\\t ll %.5f\" %\n (self.converged_, time() - self._init_prev_time, ll))\n+ def fit_predict(self, X, y=None):\n+ \"\"\"Estimate model parameters using X and predict the labels for X.\n+\n+ The method fits the model n_init times and sets the parameters with\n+ which the model has the largest likelihood or lower bound. Within each\n+ trial, the method iterates between E-step and M-step for `max_iter`\n+ times until the change of likelihood or lower bound is less than\n+ `tol`, otherwise, a `ConvergenceWarning` is raised.\n+\n+ Parameters\n+ ----------\n+ X : array-like, shape (n_samples, n_features)\n+ List of n_features-dimensional data points. Each row\n+ corresponds to a single data point.\n+\n+ Returns\n+ -------\n+ labels : array, shape (n_samples,)\n+ Component labels.\n+ \"\"\"\n+ self.fit(X)\n+ return self.labels_\n" - }, { "name": "20240523_aider", - "patch": "diff --git a/sklearn/mixture/base.py b/sklearn/mixture/base.py\nindex a9f66740f..fac536158 100644\n--- a/sklearn/mixture/base.py\n+++ b/sklearn/mixture/base.py\n@@ -74,6 +74,7 @@ class BaseMixture(six.with_metaclass(ABCMeta, DensityMixin, BaseEstimator)):\n def __init__(self, n_components, tol, reg_covar,\n max_iter, n_init, init_params, random_state, warm_start,\n verbose, verbose_interval):\n+ self.labels_ = None\n self.n_components = n_components\n self.tol = tol\n self.reg_covar = reg_covar\n@@ -242,6 +243,21 @@ class BaseMixture(six.with_metaclass(ABCMeta, DensityMixin, BaseEstimator)):\n \n return self\n \n+ def fit_predict(self, X, y=None):\n+ \"\"\"Fit and then predict labels for data.\n+\n+ Parameters\n+ ----------\n+ X : array-like, shape = [n_samples, n_features]\n+\n+ Returns\n+ -------\n+ labels : array, shape = (n_samples,) component memberships\n+ \"\"\"\n+ self.fit(X, y)\n+ self.labels_ = self.predict(X)\n+ return self.labels_\n+\n def _e_step(self, X):\n \"\"\"E step.\n \ndiff --git a/sklearn/mixture/gmm.py b/sklearn/mixture/gmm.py\nindex b3c231314..037330445 100644\n--- a/sklearn/mixture/gmm.py\n+++ b/sklearn/mixture/gmm.py\n@@ -260,6 +260,7 @@ class _GMMBase(BaseEstimator):\n random_state=None, tol=1e-3, min_covar=1e-3,\n n_iter=100, n_init=1, params='wmc', init_params='wmc',\n verbose=0):\n+ self.labels_ = None\n self.n_components = n_components\n self.covariance_type = covariance_type\n self.tol = tol\n@@ -372,7 +373,9 @@ class _GMMBase(BaseEstimator):\n C : array, shape = (n_samples,) component memberships\n \"\"\"\n logprob, responsibilities = self.score_samples(X)\n- return responsibilities.argmax(axis=1)\n+ labels = responsibilities.argmax(axis=1)\n+ self.labels_ = labels\n+ return labels\n \n def predict_proba(self, X):\n \"\"\"Predict posterior probability of data under each Gaussian\n@@ -450,7 +453,8 @@ class _GMMBase(BaseEstimator):\n -------\n C : array, shape = (n_samples,) component memberships\n \"\"\"\n- return self._fit(X, y).argmax(axis=1)\n+ self.labels_ = self._fit(X, y).argmax(axis=1)\n+ return self.labels_\n \n def _fit(self, X, y=None, do_prediction=False):\n \"\"\"Estimate model parameters with the EM algorithm.\n" + "updated_spans": { + "requests/exceptions.py": [ + "StreamConsumedError" + ] + }, + "alternative_spans": {} }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/sklearn/mixture/base.py b/sklearn/mixture/base.py\nindex a9f66740f..e558c18af 100644\n--- a/sklearn/mixture/base.py\n+++ b/sklearn/mixture/base.py\n@@ -240,8 +240,34 @@ class BaseMixture(six.with_metaclass(ABCMeta, DensityMixin, BaseEstimator)):\n self._set_parameters(best_params)\n self.n_iter_ = best_n_iter\n \n+ # Calculate labels based on the responsibilities\n+ _, resp = self._e_step(X)\n+ self.labels_ = np.argmax(resp, axis=1)\n return self\n \n+ def fit_predict(self, X, y=None):\n+ \"\"\"Estimate model parameters using X and predict the labels for X.\n+\n+ The method fits the model n_init times and sets the parameters with\n+ which the model has the largest likelihood or lower bound. Within each\n+ trial, the method iterates between E-step and M-step for max_iter\n+ times until the change of likelihood or lower bound is less than\n+ tol, otherwise, a ConvergenceWarning is raised. After fitting, it\n+ predicts the labels for X.\n+\n+ Parameters\n+ ----------\n+ X : array-like, shape (n_samples, n_features)\n+ List of n_features-dimensional data points. Each row\n+ corresponds to a single data point.\n+\n+ Returns\n+ -------\n+ labels : array, shape (n_samples,)\n+ Component labels.\n+ \"\"\"\n+ return self.fit(X, y).labels_\n+\n def _e_step(self, X):\n \"\"\"E step.\n \n" + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "requests/adapters.py": [ + "imports", + "HTTPAdapter.proxy_headers", + "HTTPAdapter.send", + "HTTPAdapter" + ] + }, + "alternative_spans": { + "requests/adapters.py": [ + "imports", + "HTTPAdapter.proxy_headers", + "HTTPAdapter.send", + "HTTPAdapter" + ] + } }, { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/sklearn/mixture/base.py b/sklearn/mixture/base.py\nindex a9f66740f..a04e2f0fc 100644\n--- a/sklearn/mixture/base.py\n+++ b/sklearn/mixture/base.py\n@@ -84,6 +84,38 @@ class BaseMixture(six.with_metaclass(ABCMeta, DensityMixin, BaseEstimator)):\n self.warm_start = warm_start\n self.verbose = verbose\n self.verbose_interval = verbose_interval\n+ self.labels_ = None\n+\n+ def fit_predict(self, X, y=None):\n+ \"\"\"Fit the model to the data and then return the labels for each sample.\n+\n+ Parameters\n+ ----------\n+ X : array-like, shape (n_samples, n_features)\n+\n+ Returns\n+ -------\n+ labels : array, shape (n_samples,)\n+ Component memberships for each sample.\n+ \"\"\"\n+ self.fit(X, y)\n+ self.labels_ = self.predict(X)\n+ return self.labels_\n+\n+ def fit_predict(self, X, y=None):\n+ \"\"\"Fit the model to the data and then return the labels for each sample.\n+\n+ Parameters\n+ ----------\n+ X : array-like, shape (n_samples, n_features)\n+\n+ Returns\n+ -------\n+ labels : array, shape (n_samples,)\n+ Component memberships for each sample.\n+ \"\"\"\n+ self.fit(X, y)\n+ return self.predict(X)\n \n def _check_initial_parameters(self, X):\n \"\"\"Check values of the basic parameters.\n" - } - ], - "alternative_spans": [ - { - "run_name": "20240523_aider", - "spans": { - "sklearn/mixture/base.py": [ - "BaseMixture", - "BaseMixture._e_step" + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "requests/adapters.py": [ + "HTTPAdapter", + "HTTPAdapter.send" + ] + }, + "alternative_spans": { + "requests/adapters.py": [ + "HTTPAdapter", + "HTTPAdapter.send" ] } - } - ] - }, - { - "instance_id": "sympy__sympy-14817", - "repo": "sympy/sympy", - "base_commit": "0dbdc0ea83d339936da175f8c3a97d0d6bafb9f8", - "problem_statement": "Error pretty printing MatAdd\n```py\r\n>>> pprint(MatrixSymbol('x', n, n) + MatrixSymbol('y*', n, n))\r\nTraceback (most recent call last):\r\n File \"./sympy/core/sympify.py\", line 368, in sympify\r\n expr = parse_expr(a, local_dict=locals, transformations=transformations, evaluate=evaluate)\r\n File \"./sympy/parsing/sympy_parser.py\", line 950, in parse_expr\r\n return eval_expr(code, local_dict, global_dict)\r\n File \"./sympy/parsing/sympy_parser.py\", line 863, in eval_expr\r\n code, global_dict, local_dict) # take local objects in preference\r\n File \"\", line 1\r\n Symbol ('y' )*\r\n ^\r\nSyntaxError: unexpected EOF while parsing\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"./sympy/printing/pretty/pretty.py\", line 2371, in pretty_print\r\n use_unicode_sqrt_char=use_unicode_sqrt_char))\r\n File \"./sympy/printing/pretty/pretty.py\", line 2331, in pretty\r\n return pp.doprint(expr)\r\n File \"./sympy/printing/pretty/pretty.py\", line 62, in doprint\r\n return self._print(expr).render(**self._settings)\r\n File \"./sympy/printing/printer.py\", line 274, in _print\r\n return getattr(self, printmethod)(expr, *args, **kwargs)\r\n File \"./sympy/printing/pretty/pretty.py\", line 828, in _print_MatAdd\r\n if S(item.args[0]).is_negative:\r\n File \"./sympy/core/sympify.py\", line 370, in sympify\r\n raise SympifyError('could not parse %r' % a, exc)\r\nsympy.core.sympify.SympifyError: Sympify of expression 'could not parse 'y*'' failed, because of exception being raised:\r\nSyntaxError: unexpected EOF while parsing (, line 1)\r\n```\r\n\r\nThe code shouldn't be using sympify to handle string arguments from MatrixSymbol.\r\n\r\nI don't even understand what the code is doing. Why does it omit the `+` when the first argument is negative? This seems to assume that the arguments of MatAdd have a certain form, and that they will always print a certain way if they are negative. \n", - "golden_patch": "diff --git a/sympy/printing/pretty/pretty.py b/sympy/printing/pretty/pretty.py\n--- a/sympy/printing/pretty/pretty.py\n+++ b/sympy/printing/pretty/pretty.py\n@@ -825,7 +825,8 @@ def _print_MatAdd(self, expr):\n if s is None:\n s = pform # First element\n else:\n- if S(item.args[0]).is_negative:\n+ coeff = item.as_coeff_mmul()[0]\n+ if _coeff_isneg(S(coeff)):\n s = prettyForm(*stringPict.next(s, ' '))\n pform = self._print(item)\n else:\n", - "expected_spans": { - "sympy/printing/pretty/pretty.py": [ - "PrettyPrinter._print_MatAdd" - ] - }, - "resolved_by": [ + }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/sympy/printing/pretty/pretty.py\n+++ b/sympy/printing/pretty/pretty.py\n@@ -825,11 +825,9 @@\n if s is None:\n s = pform # First element\n else:\n- if S(item.args[0]).is_negative:\n- s = prettyForm(*stringPict.next(s, ' '))\n- pform = self._print(item)\n- else:\n- s = prettyForm(*stringPict.next(s, ' + '))\n+ delimiter = ' + ' if not str(item).startswith('-') else ' '\n+ s = prettyForm(*stringPict.next(s, delimiter))\n+ pform = self._print(item)\n s = prettyForm(*stringPict.next(s, pform))\n \n return s\n" + "name": "20240828_autose_mixed", + "updated_spans": { + "requests/models.py": [ + "Response.iter_content" + ], + "test_requests.py": [] + }, + "alternative_spans": {} }, { - "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/sympy/printing/pretty/pretty.py b/sympy/printing/pretty/pretty.py\nindex 0545797d5f..836e9e6aa8 100644\n--- a/sympy/printing/pretty/pretty.py\n+++ b/sympy/printing/pretty/pretty.py\n@@ -825,10 +825,16 @@ def _print_MatAdd(self, expr):\n if s is None:\n s = pform # First element\n else:\n- if S(item.args[0]).is_negative:\n- s = prettyForm(*stringPict.next(s, ' '))\n- pform = self._print(item)\n- else:\n+ # Check if the first argument is negative without using sympify directly on it\n+ try:\n+ if item.args[0].is_negative:\n+ s = prettyForm(*stringPict.next(s, ' '))\n+ pform = self._print(item)\n+ else:\n+ s = prettyForm(*stringPict.next(s, ' + '))\n+ except AttributeError:\n+ # Fallback if item.args[0] does not have is_negative attribute\n+ # This handles cases where sympify would fail\n s = prettyForm(*stringPict.next(s, ' + '))\n s = prettyForm(*stringPict.next(s, pform))\n \n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-15011", - "repo": "sympy/sympy", - "base_commit": "b7c5ba2bf3ffd5cf453b25af7c8ddd9a639800cb", - "problem_statement": "lambdify does not work with certain MatrixSymbol names even with dummify=True\n`lambdify` is happy with curly braces in a symbol name and with `MatrixSymbol`s, but not with both at the same time, even if `dummify` is `True`.\r\n\r\nHere is some basic code that gives the error.\r\n```\r\nimport sympy as sy\r\ncurlyx = sy.symbols(\"{x}\")\r\nv = sy.MatrixSymbol(\"v\", 2, 1)\r\ncurlyv = sy.MatrixSymbol(\"{v}\", 2, 1)\r\n```\r\n\r\nThe following two lines of code work:\r\n```\r\ncurlyScalarId = sy.lambdify(curlyx, curlyx)\r\nvectorId = sy.lambdify(v,v)\r\n```\r\n\r\nThe following two lines of code give a `SyntaxError`:\r\n```\r\ncurlyVectorId = sy.lambdify(curlyv, curlyv)\r\ncurlyVectorIdDummified = sy.lambdify(curlyv, curlyv, dummify=True)\r\n```\r\n\r\n\n", - "golden_patch": "diff --git a/sympy/utilities/lambdify.py b/sympy/utilities/lambdify.py\n--- a/sympy/utilities/lambdify.py\n+++ b/sympy/utilities/lambdify.py\n@@ -700,14 +700,13 @@ def _is_safe_ident(cls, ident):\n return isinstance(ident, str) and cls._safe_ident_re.match(ident) \\\n and not (keyword.iskeyword(ident) or ident == 'None')\n \n-\n def _preprocess(self, args, expr):\n \"\"\"Preprocess args, expr to replace arguments that do not map\n to valid Python identifiers.\n \n Returns string form of args, and updated expr.\n \"\"\"\n- from sympy import Dummy, Symbol, Function, flatten\n+ from sympy import Dummy, Symbol, MatrixSymbol, Function, flatten\n from sympy.matrices import DeferredVector\n \n dummify = self._dummify\n@@ -725,7 +724,7 @@ def _preprocess(self, args, expr):\n argstrs.append(nested_argstrs)\n elif isinstance(arg, DeferredVector):\n argstrs.append(str(arg))\n- elif isinstance(arg, Symbol):\n+ elif isinstance(arg, Symbol) or isinstance(arg, MatrixSymbol):\n argrep = self._argrepr(arg)\n \n if dummify or not self._is_safe_ident(argrep):\n@@ -739,7 +738,14 @@ def _preprocess(self, args, expr):\n argstrs.append(self._argrepr(dummy))\n expr = self._subexpr(expr, {arg: dummy})\n else:\n- argstrs.append(str(arg))\n+ argrep = self._argrepr(arg)\n+\n+ if dummify:\n+ dummy = Dummy()\n+ argstrs.append(self._argrepr(dummy))\n+ expr = self._subexpr(expr, {arg: dummy})\n+ else:\n+ argstrs.append(str(arg))\n \n return argstrs, expr\n \n", - "expected_spans": { - "sympy/utilities/lambdify.py": [ - "_EvaluatorPrinter._preprocess" - ] - }, - "resolved_by": [ + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/sympy/utilities/lambdify.py\n+++ b/sympy/utilities/lambdify.py\n@@ -718,6 +718,11 @@\n if not dummify:\n dummify = any(isinstance(arg, Dummy) for arg in flatten(args))\n \n+ from sympy import MatrixSymbol\n+ def clean_symbol_name(name):\n+ # Replace curly braces and other invalid characters with underscores\n+ return re.sub(r'[\\\\{}]+', '_', name)\n+\n argstrs = []\n for arg in args:\n if iterable(arg):\n@@ -725,7 +730,7 @@\n argstrs.append(nested_argstrs)\n elif isinstance(arg, DeferredVector):\n argstrs.append(str(arg))\n- elif isinstance(arg, Symbol):\n+ elif isinstance(arg, (Symbol, MatrixSymbol)):\n argrep = self._argrepr(arg)\n \n if dummify or not self._is_safe_ident(argrep):\n@@ -733,6 +738,9 @@\n argstrs.append(self._argrepr(dummy))\n expr = self._subexpr(expr, {arg: dummy})\n else:\n+ # For MatrixSymbol with invalid names, ensure valid Python identifier\n+ if isinstance(arg, MatrixSymbol):\n+ argrep = clean_symbol_name(argrep)\n argstrs.append(argrep)\n elif isinstance(arg, Function):\n dummy = Dummy()\n" + "name": "20240706_sima_gpt4o", + "updated_spans": { + "requests/exceptions.py": [ + "RequestException", + "Timeout", + "ContentDecodingError" + ] + }, + "alternative_spans": {} }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/sympy/utilities/lambdify.py b/sympy/utilities/lambdify.py\nindex f71bea25f5..8c8f091373 100644\n--- a/sympy/utilities/lambdify.py\n+++ b/sympy/utilities/lambdify.py\n@@ -707,7 +707,7 @@ def _preprocess(self, args, expr):\n \n Returns string form of args, and updated expr.\n \"\"\"\n- from sympy import Dummy, Symbol, Function, flatten\n+ from sympy import Dummy, Symbol, Function, flatten, MatrixSymbol\n from sympy.matrices import DeferredVector\n \n dummify = self._dummify\n@@ -725,7 +725,7 @@ def _preprocess(self, args, expr):\n argstrs.append(nested_argstrs)\n elif isinstance(arg, DeferredVector):\n argstrs.append(str(arg))\n- elif isinstance(arg, Symbol):\n+ elif isinstance(arg, (Symbol, MatrixSymbol)):\n argrep = self._argrepr(arg)\n \n if dummify or not self._is_safe_ident(argrep):\n@@ -743,6 +743,8 @@ def _preprocess(self, args, expr):\n \n return argstrs, expr\n \n+ return argstrs, expr\n+\n def _subexpr(self, expr, dummies_dict):\n from sympy.matrices import DeferredVector\n from sympy import sympify\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "astropy__astropy-7746", - "repo": "astropy/astropy", - "base_commit": "d5bd3f68bb6d5ce3a61bdce9883ee750d1afade5", - "problem_statement": "Issue when passing empty lists/arrays to WCS transformations\nThe following should not fail but instead should return empty lists/arrays:\r\n\r\n```\r\nIn [1]: from astropy.wcs import WCS\r\n\r\nIn [2]: wcs = WCS('2MASS_h.fits')\r\n\r\nIn [3]: wcs.wcs_pix2world([], [], 0)\r\n---------------------------------------------------------------------------\r\nInconsistentAxisTypesError Traceback (most recent call last)\r\n in ()\r\n----> 1 wcs.wcs_pix2world([], [], 0)\r\n\r\n~/Dropbox/Code/Astropy/astropy/astropy/wcs/wcs.py in wcs_pix2world(self, *args, **kwargs)\r\n 1352 return self._array_converter(\r\n 1353 lambda xy, o: self.wcs.p2s(xy, o)['world'],\r\n-> 1354 'output', *args, **kwargs)\r\n 1355 wcs_pix2world.__doc__ = \"\"\"\r\n 1356 Transforms pixel coordinates to world coordinates by doing\r\n\r\n~/Dropbox/Code/Astropy/astropy/astropy/wcs/wcs.py in _array_converter(self, func, sky, ra_dec_order, *args)\r\n 1267 \"a 1-D array for each axis, followed by an origin.\")\r\n 1268 \r\n-> 1269 return _return_list_of_arrays(axes, origin)\r\n 1270 \r\n 1271 raise TypeError(\r\n\r\n~/Dropbox/Code/Astropy/astropy/astropy/wcs/wcs.py in _return_list_of_arrays(axes, origin)\r\n 1223 if ra_dec_order and sky == 'input':\r\n 1224 xy = self._denormalize_sky(xy)\r\n-> 1225 output = func(xy, origin)\r\n 1226 if ra_dec_order and sky == 'output':\r\n 1227 output = self._normalize_sky(output)\r\n\r\n~/Dropbox/Code/Astropy/astropy/astropy/wcs/wcs.py in (xy, o)\r\n 1351 raise ValueError(\"No basic WCS settings were created.\")\r\n 1352 return self._array_converter(\r\n-> 1353 lambda xy, o: self.wcs.p2s(xy, o)['world'],\r\n 1354 'output', *args, **kwargs)\r\n 1355 wcs_pix2world.__doc__ = \"\"\"\r\n\r\nInconsistentAxisTypesError: ERROR 4 in wcsp2s() at line 2646 of file cextern/wcslib/C/wcs.c:\r\nncoord and/or nelem inconsistent with the wcsprm.\r\n```\n", - "golden_patch": "diff --git a/astropy/wcs/wcs.py b/astropy/wcs/wcs.py\n--- a/astropy/wcs/wcs.py\n+++ b/astropy/wcs/wcs.py\n@@ -1212,6 +1212,9 @@ def _array_converter(self, func, sky, *args, ra_dec_order=False):\n \"\"\"\n \n def _return_list_of_arrays(axes, origin):\n+ if any([x.size == 0 for x in axes]):\n+ return axes\n+\n try:\n axes = np.broadcast_arrays(*axes)\n except ValueError:\n@@ -1235,6 +1238,8 @@ def _return_single_array(xy, origin):\n raise ValueError(\n \"When providing two arguments, the array must be \"\n \"of shape (N, {0})\".format(self.naxis))\n+ if 0 in xy.shape:\n+ return xy\n if ra_dec_order and sky == 'input':\n xy = self._denormalize_sky(xy)\n result = func(xy, origin)\n", - "expected_spans": { - "astropy/wcs/wcs.py": [ - "WCS._array_converter" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-15308", - "repo": "sympy/sympy", - "base_commit": "fb59d703e6863ed803c98177b59197b5513332e9", - "problem_statement": "LaTeX printing for Matrix Expression\n```py\r\n>>> A = MatrixSymbol(\"A\", n, n)\r\n>>> latex(trace(A**2))\r\n'Trace(A**2)'\r\n```\r\n\r\nThe bad part is not only is Trace not recognized, but whatever printer is being used doesn't fallback to the LaTeX printer for the inner expression (it should be `A^2`). \n", - "golden_patch": "diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -289,6 +289,10 @@ def _do_exponent(self, expr, exp):\n else:\n return expr\n \n+ def _print_Basic(self, expr):\n+ l = [self._print(o) for o in expr.args]\n+ return self._deal_with_super_sub(expr.__class__.__name__) + r\"\\left(%s\\right)\" % \", \".join(l)\n+\n def _print_bool(self, e):\n return r\"\\mathrm{%s}\" % e\n \n@@ -1462,6 +1466,10 @@ def _print_Transpose(self, expr):\n else:\n return \"%s^T\" % self._print(mat)\n \n+ def _print_Trace(self, expr):\n+ mat = expr.arg\n+ return r\"\\mathrm{tr}\\left (%s \\right )\" % self._print(mat)\n+\n def _print_Adjoint(self, expr):\n mat = expr.arg\n from sympy.matrices import MatrixSymbol\n", - "expected_spans": { - "sympy/printing/latex.py": [ - "LatexPrinter._print_bool", - "LatexPrinter._print_Adjoint" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-15345", - "repo": "sympy/sympy", - "base_commit": "9ef28fba5b4d6d0168237c9c005a550e6dc27d81", - "problem_statement": "mathematica_code gives wrong output with Max\nIf I run the code\r\n\r\n```\r\nx = symbols('x')\r\nmathematica_code(Max(x,2))\r\n```\r\n\r\nthen I would expect the output `'Max[x,2]'` which is valid Mathematica code but instead I get `'Max(2, x)'` which is not valid Mathematica code.\n", - "golden_patch": "diff --git a/sympy/printing/mathematica.py b/sympy/printing/mathematica.py\n--- a/sympy/printing/mathematica.py\n+++ b/sympy/printing/mathematica.py\n@@ -31,7 +31,8 @@\n \"asech\": [(lambda x: True, \"ArcSech\")],\n \"acsch\": [(lambda x: True, \"ArcCsch\")],\n \"conjugate\": [(lambda x: True, \"Conjugate\")],\n-\n+ \"Max\": [(lambda *x: True, \"Max\")],\n+ \"Min\": [(lambda *x: True, \"Min\")],\n }\n \n \n@@ -101,6 +102,8 @@ def _print_Function(self, expr):\n return \"%s[%s]\" % (mfunc, self.stringify(expr.args, \", \"))\n return expr.func.__name__ + \"[%s]\" % self.stringify(expr.args, \", \")\n \n+ _print_MinMaxBase = _print_Function\n+\n def _print_Integral(self, expr):\n if len(expr.variables) == 1 and not expr.limits[0][1:]:\n args = [expr.args[0], expr.variables[0]]\n", - "expected_spans": { - "sympy/printing/mathematica.py": [ - "impl", - "MCodePrinter._print_Integral" - ] - }, - "resolved_by": [ - { - "name": "20240509_amazon-q-developer-agent-20240430-dev", - "patch": "--- a/sympy/printing/mathematica.py\n+++ b/sympy/printing/mathematica.py\n@@ -90,12 +90,15 @@\n \n def _print_list(self, expr):\n return '{' + ', '.join(self.doprint(a) for a in expr) + '}'\n _print_tuple = _print_list\n _print_Tuple = _print_list\n \n+ def _print_MinMaxBase(self, expr):\n+ return expr.func.__name__ + \"[\" + ', '.join(self.doprint(a) for a in expr.args) + \"]\"\n+\n def _print_Function(self, expr):\n if expr.func.__name__ in self.known_functions:\n cond_mfunc = self.known_functions[expr.func.__name__]\n for cond, mfunc in cond_mfunc:\n if cond(*expr.args):\n return \"%s[%s]\" % (mfunc, self.stringify(expr.args, \", \"))\n" - } - ], - "alternative_spans": [ + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "requests/adapters.py": [ + "imports", + "HTTPAdapter.build_response" + ] + }, + "alternative_spans": { + "requests/adapters.py": [ + "imports", + "HTTPAdapter.build_response" + ] + } + }, { - "run_name": "20240509_amazon-q-developer-agent-20240430-dev", - "spans": { - "sympy/printing/mathematica.py": [ - "MCodePrinter:13", - "MCodePrinter._print_Function" + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "requests/adapters.py": [ + "docstring", + "imports", + "HTTPAdapter", + "HTTPAdapter.__init__", + "HTTPAdapter.__getstate__", + "HTTPAdapter.__setstate__", + "HTTPAdapter.init_poolmanager", + "HTTPAdapter.proxy_manager_for", + "HTTPAdapter.cert_verify", + "HTTPAdapter.build_response", + "HTTPAdapter.get_connection", + "HTTPAdapter.request_url", + "HTTPAdapter.proxy_headers", + "HTTPAdapter.send" + ] + }, + "alternative_spans": { + "requests/adapters.py": [ + "docstring", + "imports", + "HTTPAdapter", + "HTTPAdapter.__init__", + "HTTPAdapter.__getstate__", + "HTTPAdapter.__setstate__", + "HTTPAdapter.init_poolmanager", + "HTTPAdapter.proxy_manager_for", + "HTTPAdapter.cert_verify", + "HTTPAdapter.build_response", + "HTTPAdapter.get_connection", + "HTTPAdapter.request_url", + "HTTPAdapter.proxy_headers", + "HTTPAdapter.send" ] } - } - ] - }, - { - "instance_id": "sympy__sympy-15346", - "repo": "sympy/sympy", - "base_commit": "9ef28fba5b4d6d0168237c9c005a550e6dc27d81", - "problem_statement": "can't simplify sin/cos with Rational?\nlatest cloned sympy, python 3 on windows\r\nfirstly, cos, sin with symbols can be simplified; rational number can be simplified\r\n```python\r\nfrom sympy import *\r\n\r\nx, y = symbols('x, y', real=True)\r\nr = sin(x)*sin(y) + cos(x)*cos(y)\r\nprint(r)\r\nprint(r.simplify())\r\nprint()\r\n\r\nr = Rational(1, 50) - Rational(1, 25)\r\nprint(r)\r\nprint(r.simplify())\r\nprint()\r\n```\r\nsays\r\n```cmd\r\nsin(x)*sin(y) + cos(x)*cos(y)\r\ncos(x - y)\r\n\r\n-1/50\r\n-1/50\r\n```\r\n\r\nbut\r\n```python\r\nt1 = Matrix([sin(Rational(1, 50)), cos(Rational(1, 50)), 0])\r\nt2 = Matrix([sin(Rational(1, 25)), cos(Rational(1, 25)), 0])\r\nr = t1.dot(t2)\r\nprint(r)\r\nprint(r.simplify())\r\nprint()\r\n\r\nr = sin(Rational(1, 50))*sin(Rational(1, 25)) + cos(Rational(1, 50))*cos(Rational(1, 25))\r\nprint(r)\r\nprint(r.simplify())\r\nprint()\r\n\r\nprint(acos(r))\r\nprint(acos(r).simplify())\r\nprint()\r\n```\r\nsays\r\n```cmd\r\nsin(1/50)*sin(1/25) + cos(1/50)*cos(1/25)\r\nsin(1/50)*sin(1/25) + cos(1/50)*cos(1/25)\r\n\r\nsin(1/50)*sin(1/25) + cos(1/50)*cos(1/25)\r\nsin(1/50)*sin(1/25) + cos(1/50)*cos(1/25)\r\n\r\nacos(sin(1/50)*sin(1/25) + cos(1/50)*cos(1/25))\r\nacos(sin(1/50)*sin(1/25) + cos(1/50)*cos(1/25))\r\n```\r\n\r\n\n", - "golden_patch": "diff --git a/sympy/simplify/trigsimp.py b/sympy/simplify/trigsimp.py\n--- a/sympy/simplify/trigsimp.py\n+++ b/sympy/simplify/trigsimp.py\n@@ -1143,8 +1143,8 @@ def _futrig(e, **kwargs):\n lambda x: _eapply(factor, x, trigs),\n TR14, # factored powers of identities\n [identity, lambda x: _eapply(_mexpand, x, trigs)],\n- TRmorrie,\n TR10i, # sin-cos products > sin-cos of sums\n+ TRmorrie,\n [identity, TR8], # sin-cos products -> sin-cos of sums\n [identity, lambda x: TR2i(TR2(x))], # tan -> sin-cos -> tan\n [\n", - "expected_spans": { - "sympy/simplify/trigsimp.py": [ - "_futrig" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "scikit-learn__scikit-learn-12471", - "repo": "scikit-learn/scikit-learn", - "base_commit": "02dc9ed680e7f53f1b0d410dcdd37341c7958eb1", - "problem_statement": "OneHotEncoder ignore unknown error when categories are strings \n#### Description\r\n\r\nThis bug is very specific, but it happens when you set OneHotEncoder to ignore unknown entries.\r\nand your labels are strings. The memory of the arrays is not handled safely and it can lead to a ValueError\r\n\r\nBasically, when you call the transform method it will sets all the unknown strings on your array to OneHotEncoder.categories_[i][0] which is the first category alphabetically sorted given for fit\r\nIf this OneHotEncoder.categories_[i][0] is a long string, and the array that you want to transform has small strings, then it is impossible to fit the whole OneHotEncoder.categories_[i][0] into the entries of the array we want to transform. So OneHotEncoder.categories_[i][0] is truncated and this raise the ValueError.\r\n\r\n\r\n\r\n#### Steps/Code to Reproduce\r\n```\r\n\r\nimport numpy as np\r\nfrom sklearn.preprocessing import OneHotEncoder\r\n\r\n\r\n# It needs to be numpy arrays, the error does not appear \r\n# is you have lists of lists because it gets treated like an array of objects.\r\ntrain = np.array([ '22','333','4444','11111111' ]).reshape((-1,1))\r\ntest = np.array([ '55555', '22' ]).reshape((-1,1))\r\n\r\nohe = OneHotEncoder(dtype=bool,handle_unknown='ignore')\r\n\r\nohe.fit( train )\r\nenc_test = ohe.transform( test )\r\n\r\n```\r\n\r\n\r\n#### Expected Results\r\nHere we should get an sparse matrix 2x4 false everywhere except at (1,1) the '22' that is known\r\n\r\n#### Actual Results\r\n\r\n> ValueError: y contains previously unseen labels: ['111111']\r\n\r\n\r\n#### Versions\r\nSystem:\r\n python: 2.7.12 (default, Dec 4 2017, 14:50:18) [GCC 5.4.0 20160609]\r\n machine: Linux-4.4.0-138-generic-x86_64-with-Ubuntu-16.04-xenial\r\nexecutable: /usr/bin/python\r\n\r\nBLAS:\r\n macros: HAVE_CBLAS=None\r\ncblas_libs: openblas, openblas\r\n lib_dirs: /usr/lib\r\n\r\nPython deps:\r\n Cython: 0.25.2\r\n scipy: 0.18.1\r\nsetuptools: 36.7.0\r\n pip: 9.0.1\r\n numpy: 1.15.2\r\n pandas: 0.19.1\r\n sklearn: 0.21.dev0\r\n\r\n\r\n\r\n#### Comments\r\n\r\nI already implemented a fix for this issue, where I check the size of the elements in the array before, and I cast them into objects if necessary.\n", - "golden_patch": "diff --git a/sklearn/preprocessing/_encoders.py b/sklearn/preprocessing/_encoders.py\n--- a/sklearn/preprocessing/_encoders.py\n+++ b/sklearn/preprocessing/_encoders.py\n@@ -110,7 +110,14 @@ def _transform(self, X, handle_unknown='error'):\n # continue `The rows are marked `X_mask` and will be\n # removed later.\n X_mask[:, i] = valid_mask\n- Xi = Xi.copy()\n+ # cast Xi into the largest string type necessary\n+ # to handle different lengths of numpy strings\n+ if (self.categories_[i].dtype.kind in ('U', 'S')\n+ and self.categories_[i].itemsize > Xi.itemsize):\n+ Xi = Xi.astype(self.categories_[i].dtype)\n+ else:\n+ Xi = Xi.copy()\n+\n Xi[~valid_mask] = self.categories_[i][0]\n _, encoded = _encode(Xi, self.categories_[i], encode=True)\n X_int[:, i] = encoded\n", - "expected_spans": { - "sklearn/preprocessing/_encoders.py": [ - "_BaseEncoder._transform" - ] - }, - "resolved_by": [ + }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/sklearn/preprocessing/_encoders.py b/sklearn/preprocessing/_encoders.py\nindex 1e05aba59..f34f021d2 100644\n--- a/sklearn/preprocessing/_encoders.py\n+++ b/sklearn/preprocessing/_encoders.py\n@@ -111,6 +111,11 @@ class _BaseEncoder(BaseEstimator, TransformerMixin):\n # removed later.\n X_mask[:, i] = valid_mask\n Xi = Xi.copy()\n+ if valid_mask.any(): # if there are any valid_mask\n+ unseen = np.unique(Xi[~valid_mask])\n+ if len(str(unseen[0])) > Xi.itemsize:\n+ # convert array to dtype object if the unseen label can't fit in\n+ Xi = Xi.astype(object)\n Xi[~valid_mask] = self.categories_[i][0]\n _, encoded = _encode(Xi, self.categories_[i], encode=True)\n X_int[:, i] = encoded\ndiff --git a/sklearn/preprocessing/label.py b/sklearn/preprocessing/label.py\nindex 12f95b5e2..cf950c27c 100644\n--- a/sklearn/preprocessing/label.py\n+++ b/sklearn/preprocessing/label.py\n@@ -47,10 +47,10 @@ def _encode_numpy(values, uniques=None, encode=False):\n # unique sorts\n return np.unique(values)\n if encode:\n- diff = _encode_check_unknown(values, uniques)\n- if diff:\n- raise ValueError(\"y contains previously unseen labels: %s\"\n- % str(diff))\n+ # Get the mask of valid (already seen) labels\n+ valid_mask = np.in1d(values, uniques)\n+ # For invalid (unseen) labels, set them to an existing category (the first one in this case)\n+ values[~valid_mask] = uniques[0] if len(uniques) > 0 else values[0]\n encoded = np.searchsorted(uniques, values)\n return uniques, encoded\n else:\n" + "name": "20240820_honeycomb", + "updated_spans": { + "requests/adapters.py": [ + "docstring", + "imports", + "HTTPAdapter.send" + ] + }, + "alternative_spans": { + "requests/adapters.py": [ + "docstring", + "imports", + "HTTPAdapter.send" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/sklearn/preprocessing/_encoders.py b/sklearn/preprocessing/_encoders.py\nindex 1e05aba59..ee20bd2ef 100644\n--- a/sklearn/preprocessing/_encoders.py\n+++ b/sklearn/preprocessing/_encoders.py\n@@ -111,6 +111,9 @@ class _BaseEncoder(BaseEstimator, TransformerMixin):\n # removed later.\n X_mask[:, i] = valid_mask\n Xi = Xi.copy()\n+ if Xi.dtype.kind in 'US' and self.categories_[i].dtype.kind in 'US':\n+ # Cast to object to avoid truncation if necessary\n+ Xi = Xi.astype('O')\n Xi[~valid_mask] = self.categories_[i][0]\n _, encoded = _encode(Xi, self.categories_[i], encode=True)\n X_int[:, i] = encoded\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-15609", - "repo": "sympy/sympy", - "base_commit": "15f56f3b0006d2ed2c29bde3c43e91618012c849", - "problem_statement": "Indexed matrix-expression LaTeX printer is not compilable\n```python\r\ni, j, k = symbols(\"i j k\")\r\nM = MatrixSymbol(\"M\", k, k)\r\nN = MatrixSymbol(\"N\", k, k)\r\nlatex((M*N)[i, j])\r\n```\r\n\r\nThe LaTeX string produced by the last command is:\r\n```\r\n\\sum_{i_{1}=0}^{k - 1} M_{i, _i_1} N_{_i_1, j}\r\n```\r\nLaTeX complains about a double subscript `_`. This expression won't render in MathJax either.\n", - "golden_patch": "diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -1438,7 +1438,10 @@ def _print_MatrixBase(self, expr):\n \n def _print_MatrixElement(self, expr):\n return self.parenthesize(expr.parent, PRECEDENCE[\"Atom\"], strict=True) \\\n- + '_{%s, %s}' % (expr.i, expr.j)\n+ + '_{%s, %s}' % (\n+ self._print(expr.i),\n+ self._print(expr.j)\n+ )\n \n def _print_MatrixSlice(self, expr):\n def latexslice(x):\n", - "expected_spans": { - "sympy/printing/latex.py": [ - "LatexPrinter._print_MatrixElement" - ] - }, - "resolved_by": [ + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "requests/adapters.py": [ + "HTTPAdapter", + "HTTPAdapter.send" + ] + }, + "alternative_spans": { + "requests/adapters.py": [ + "HTTPAdapter", + "HTTPAdapter.send" + ] + } + }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\nindex 901918674a..7480e736f6 100644\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -1437,8 +1437,8 @@ def _print_MatrixBase(self, expr):\n = _print_MatrixBase\n \n def _print_MatrixElement(self, expr):\n- return self.parenthesize(expr.parent, PRECEDENCE[\"Atom\"], strict=True) \\\n- + '_{%s, %s}' % (expr.i, expr.j)\n+ i, j = map(self._print, (expr.i, expr.j))\n+ return self.parenthesize(expr.parent, PRECEDENCE[\"Atom\"], strict=True) + '_{%s, %s}' % (i, j)\n \n def _print_MatrixSlice(self, expr):\n def latexslice(x):\ndiff --git a/test_latex_output.py b/test_latex_output.py\nnew file mode 100644\nindex 0000000000..fa7b20352e\n--- /dev/null\n+++ b/test_latex_output.py\n@@ -0,0 +1,8 @@\n+from sympy import symbols, MatrixSymbol, latex\n+\n+i, j, k = symbols(\"i j k\")\n+M = MatrixSymbol(\"M\", k, k)\n+N = MatrixSymbol(\"N\", k, k)\n+latex_str = latex((M*N)[i, j])\n+\n+print(\"LaTeX output:\", latex_str)\n" + "name": "20240908_infant_gpt4o", + "updated_spans": { + "requests/adapters.py": [ + "imports", + "HTTPAdapter.send" + ] + }, + "alternative_spans": { + "requests/adapters.py": [ + "imports", + "HTTPAdapter.send" + ] + } }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\nindex 901918674a..7480e736f6 100644\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -1437,8 +1437,8 @@ def _print_MatrixBase(self, expr):\n = _print_MatrixBase\n \n def _print_MatrixElement(self, expr):\n- return self.parenthesize(expr.parent, PRECEDENCE[\"Atom\"], strict=True) \\\n- + '_{%s, %s}' % (expr.i, expr.j)\n+ i, j = map(self._print, (expr.i, expr.j))\n+ return self.parenthesize(expr.parent, PRECEDENCE[\"Atom\"], strict=True) + '_{%s, %s}' % (i, j)\n \n def _print_MatrixSlice(self, expr):\n def latexslice(x):\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-15678", - "repo": "sympy/sympy", - "base_commit": "31c68eef3ffef39e2e792b0ec92cd92b7010eb2a", - "problem_statement": "Some issues with idiff\nidiff doesn't support Eq, and it also doesn't support f(x) instead of y. Both should be easy to correct.\r\n\r\n```\r\n>>> idiff(Eq(y*exp(y), x*exp(x)), y, x)\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"./sympy/geometry/util.py\", line 582, in idiff\r\n yp = solve(eq.diff(x), dydx)[0].subs(derivs)\r\nIndexError: list index out of range\r\n>>> idiff(f(x)*exp(f(x)) - x*exp(x), f(x), x)\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"./sympy/geometry/util.py\", line 574, in idiff\r\n raise ValueError(\"expecting x-dependent symbol(s) but got: %s\" % y)\r\nValueError: expecting x-dependent symbol(s) but got: f(x)\r\n>>> idiff(y*exp(y)- x*exp(x), y, x)\r\n(x + 1)*exp(x - y)/(y + 1)\r\n```\n", - "golden_patch": "diff --git a/sympy/geometry/util.py b/sympy/geometry/util.py\n--- a/sympy/geometry/util.py\n+++ b/sympy/geometry/util.py\n@@ -570,12 +570,19 @@ def idiff(eq, y, x, n=1):\n y = y[0]\n elif isinstance(y, Symbol):\n dep = {y}\n+ elif isinstance(y, Function):\n+ pass\n else:\n- raise ValueError(\"expecting x-dependent symbol(s) but got: %s\" % y)\n+ raise ValueError(\"expecting x-dependent symbol(s) or function(s) but got: %s\" % y)\n \n f = dict([(s, Function(\n s.name)(x)) for s in eq.free_symbols if s != x and s in dep])\n- dydx = Function(y.name)(x).diff(x)\n+\n+ if isinstance(y, Symbol):\n+ dydx = Function(y.name)(x).diff(x)\n+ else:\n+ dydx = y.diff(x)\n+\n eq = eq.subs(f)\n derivs = {}\n for i in range(n):\n", - "expected_spans": { - "sympy/geometry/util.py": [ - "idiff" - ] - }, - "resolved_by": [ + "name": "20240829_Isoform", + "updated_spans": { + "requests/adapters.py": [ + "HTTPAdapter.send" + ], + "requests/models.py": [ + "Response.iter_content", + "Response.json" + ] + }, + "alternative_spans": { + "requests/adapters.py": [ + "HTTPAdapter.send" + ] + } + }, { - "name": "20240523_aider", - "patch": "diff --git a/sympy/geometry/util.py b/sympy/geometry/util.py\nindex d32adc3066..18a541fc2c 100644\n--- a/sympy/geometry/util.py\n+++ b/sympy/geometry/util.py\n@@ -17,6 +17,8 @@\n is_sequence, range, string_types, ordered)\n from sympy.core.containers import OrderedSet\n from .point import Point, Point2D\n+from sympy.geometry.entity import GeometryEntity\n+from sympy.functions.elementary.miscellaneous import sqrt\n \n \n def find(x, equation):\n@@ -103,7 +105,7 @@ def are_coplanar(*e):\n pt3d.append(i)\n elif isinstance(i, LinearEntity3D):\n pt3d.extend(i.args)\n- elif isinstance(i, GeometryEntity): # XXX we should have a GeometryEntity3D class so we can tell the difference between 2D and 3D -- here we just want to deal with 2D objects; if new 3D objects are encountered that we didn't hanlde above, an error should be raised\n+ elif isinstance(i, GeometryEntity): # XXX we should have a GeometryEntity3D class so we can tell the difference between 2D and 3D -- here we just want to deal with 2D objects; if new 3D objects are encountered that we didn't handle above, an error should be raised\n # all 2D objects have some Point that defines them; so convert those points to 3D pts by making z=0\n for p in i.args:\n if isinstance(p, Point):\n@@ -541,7 +543,8 @@ def idiff(eq, y, x, n=1):\n >>> from sympy.abc import x, y, a\n >>> from sympy.geometry.util import idiff\n \n- >>> circ = x**2 + y**2 - 4\n+ >>> from sympy import Eq\n+ >>> circ = Eq(x**2 + y**2, 4)\n >>> idiff(circ, y, x)\n -x/y\n >>> idiff(circ, y, x, 2).simplify()\n@@ -565,17 +568,22 @@ def idiff(eq, y, x, n=1):\n sympy.core.function.diff: explicitly differentiates wrt symbols\n \n \"\"\"\n+ from sympy import Eq\n+\n+ if isinstance(eq, Eq):\n+ eq = eq.lhs - eq.rhs\n+\n if is_sequence(y):\n dep = set(y)\n y = y[0]\n- elif isinstance(y, Symbol):\n+ elif isinstance(y, (Symbol, Function)):\n dep = {y}\n else:\n raise ValueError(\"expecting x-dependent symbol(s) but got: %s\" % y)\n \n f = dict([(s, Function(\n s.name)(x)) for s in eq.free_symbols if s != x and s in dep])\n- dydx = Function(y.name)(x).diff(x)\n+ dydx = Function(y.name)(x).diff(x) if isinstance(y, Symbol) else y.diff(x)\n eq = eq.subs(f)\n derivs = {}\n for i in range(n):\n" + "name": "20240622_Lingma_Agent", + "updated_spans": { + "requests/adapters.py": [ + "HTTPAdapter.send" + ] + }, + "alternative_spans": { + "requests/adapters.py": [ + "HTTPAdapter.send" + ] + } }, { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/sympy/geometry/util.py b/sympy/geometry/util.py\nindex d32adc3066..c50a88b10e 100644\n--- a/sympy/geometry/util.py\n+++ b/sympy/geometry/util.py\n@@ -525,6 +525,8 @@ def hypot(x, y):\n return set(rv)\n \n \n+from sympy import Eq\n+\n def idiff(eq, y, x, n=1):\n \"\"\"Return ``dy/dx`` assuming that ``eq == 0``.\n \n@@ -565,17 +567,20 @@ def idiff(eq, y, x, n=1):\n sympy.core.function.diff: explicitly differentiates wrt symbols\n \n \"\"\"\n+ if isinstance(eq, Eq):\n+ eq = eq.lhs - eq.rhs\n+\n if is_sequence(y):\n dep = set(y)\n y = y[0]\n- elif isinstance(y, Symbol):\n+ elif isinstance(y, (Symbol, Function)):\n dep = {y}\n else:\n raise ValueError(\"expecting x-dependent symbol(s) but got: %s\" % y)\n \n f = dict([(s, Function(\n s.name)(x)) for s in eq.free_symbols if s != x and s in dep])\n- dydx = Function(y.name)(x).diff(x)\n+ dydx = y.diff(x) if isinstance(y, Function) else Function(y.name)(x).diff(x)\n eq = eq.subs(f)\n derivs = {}\n for i in range(n):\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "django__django-10914", - "repo": "django/django", - "base_commit": "e7fd69d051eaa67cb17f172a39b57253e9cb831a", - "problem_statement": "Set default FILE_UPLOAD_PERMISSION to 0o644.\nDescription\n\t\nHello,\nAs far as I can see, the \u200bFile Uploads documentation page does not mention any permission issues.\nWhat I would like to see is a warning that in absence of explicitly configured FILE_UPLOAD_PERMISSIONS, the permissions for a file uploaded to FileSystemStorage might not be consistent depending on whether a MemoryUploadedFile or a TemporaryUploadedFile was used for temporary storage of the uploaded data (which, with the default FILE_UPLOAD_HANDLERS, in turn depends on the uploaded data size).\nThe tempfile.NamedTemporaryFile + os.rename sequence causes the resulting file permissions to be 0o0600 on some systems (I experience it here on CentOS 7.4.1708 and Python 3.6.5). In all probability, the implementation of Python's built-in tempfile module explicitly sets such permissions for temporary files due to security considerations.\nI found mentions of this issue \u200bon GitHub, but did not manage to find any existing bug report in Django's bug tracker.\n", - "golden_patch": "diff --git a/django/conf/global_settings.py b/django/conf/global_settings.py\n--- a/django/conf/global_settings.py\n+++ b/django/conf/global_settings.py\n@@ -304,7 +304,7 @@ def gettext_noop(s):\n \n # The numeric mode to set newly-uploaded files to. The value should be a mode\n # you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.\n-FILE_UPLOAD_PERMISSIONS = None\n+FILE_UPLOAD_PERMISSIONS = 0o644\n \n # The numeric mode to assign to newly-created directories, when uploading files.\n # The value should be a mode as you'd pass to os.chmod;\n", - "expected_spans": { - "django/conf/global_settings.py": [ - "impl:105" - ] - }, - "resolved_by": [ + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "requests/adapters.py": [ + "docstring", + "HTTPAdapter.send" + ] + }, + "alternative_spans": { + "requests/adapters.py": [ + "docstring", + "HTTPAdapter.send" + ] + } + }, { "name": "20240402_sweagent_claude3opus", - "patch": "diff --git a/django/conf/global_settings.py b/django/conf/global_settings.py\nindex acee9887d2..bdeec80610 100644\n--- a/django/conf/global_settings.py\n+++ b/django/conf/global_settings.py\n@@ -304,7 +304,7 @@ FILE_UPLOAD_TEMP_DIR = None\n \n # The numeric mode to set newly-uploaded files to. The value should be a mode\n # you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.\n-FILE_UPLOAD_PERMISSIONS = None\n+FILE_UPLOAD_PERMISSIONS = 0o644\n \n # The numeric mode to assign to newly-created directories, when uploading files.\n # The value should be a mode as you'd pass to os.chmod;\n" + "updated_spans": {}, + "alternative_spans": {} }, { - "name": "20240402_sweagent_gpt4", - "patch": "diff --git a/django/conf/global_settings.py b/django/conf/global_settings.py\nindex acee9887d2..bdeec80610 100644\n--- a/django/conf/global_settings.py\n+++ b/django/conf/global_settings.py\n@@ -304,7 +304,7 @@ FILE_UPLOAD_TEMP_DIR = None\n \n # The numeric mode to set newly-uploaded files to. The value should be a mode\n # you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.\n-FILE_UPLOAD_PERMISSIONS = None\n+FILE_UPLOAD_PERMISSIONS = 0o644\n \n # The numeric mode to assign to newly-created directories, when uploading files.\n # The value should be a mode as you'd pass to os.chmod;\ndiff --git a/docs/topics/http/file-uploads.txt b/docs/topics/http/file-uploads.txt\nindex 21a6f06853..c398282cf7 100644\n--- a/docs/topics/http/file-uploads.txt\n+++ b/docs/topics/http/file-uploads.txt\n@@ -16,7 +16,9 @@ and in memory, and how to customize the default behavior.\n untrusted users! See the security guide's topic on\n :ref:`user-uploaded-content-security` for mitigation details.\n \n-Basic file uploads\n+.. warning::\n+ In the absence of explicitly configured FILE_UPLOAD_PERMISSIONS, the permissions for a file uploaded to FileSystemStorage might not be consistent. This is due to the use of different temporary storage mechanisms for uploaded data, which can result in different file permissions. By default, Django now sets FILE_UPLOAD_PERMISSIONS to 0o644 to ensure consistent permissions for all uploaded files. If you require different permissions, you can set FILE_UPLOAD_PERMISSIONS in your settings.\n+\n ==================\n \n Consider a simple form containing a :class:`~django.forms.FileField`:\n" + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "requests/adapters.py": [ + "imports", + "HTTPAdapter.get_connection", + "HTTPAdapter.send", + "HTTPAdapter" + ], + "requests/exceptions.py": [ + "docstring" + ] + }, + "alternative_spans": { + "requests/adapters.py": [ + "imports", + "HTTPAdapter.get_connection", + "HTTPAdapter.send", + "HTTPAdapter" + ] + } }, { - "name": "20240509_amazon-q-developer-agent-20240430-dev", - "patch": "--- a/django/conf/global_settings.py\n+++ b/django/conf/global_settings.py\n@@ -301,13 +301,13 @@\n # `None` will make Django use the operating system's default temporary directory\n # (i.e. \"/tmp\" on *nix systems).\n FILE_UPLOAD_TEMP_DIR = None\n \n # The numeric mode to set newly-uploaded files to. The value should be a mode\n # you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.\n-FILE_UPLOAD_PERMISSIONS = None\n+FILE_UPLOAD_PERMISSIONS = 0o644\n \n # The numeric mode to assign to newly-created directories, when uploading files.\n # The value should be a mode as you'd pass to os.chmod;\n # see https://docs.python.org/library/os.html#files-and-directories.\n FILE_UPLOAD_DIRECTORY_PERMISSIONS = None\n \n" + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "requests/adapters.py": [ + "imports" + ] + }, + "alternative_spans": { + "requests/adapters.py": [ + "imports" + ] + } }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/django/conf/global_settings.py\n+++ b/django/conf/global_settings.py\n@@ -304,7 +304,7 @@\n \n # The numeric mode to set newly-uploaded files to. The value should be a mode\n # you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.\n-FILE_UPLOAD_PERMISSIONS = None\n+FILE_UPLOAD_PERMISSIONS = 0o644\n \n # The numeric mode to assign to newly-created directories, when uploading files.\n # The value should be a mode as you'd pass to os.chmod;\n" + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "requests/adapters.py": [ + "HTTPAdapter.send" + ], + "requests/models.py": [ + "Response.iter_content", + "Response.json" + ] + }, + "alternative_spans": { + "requests/adapters.py": [ + "HTTPAdapter.send" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/django/conf/global_settings.py b/django/conf/global_settings.py\nindex acee9887d2..bdeec80610 100644\n--- a/django/conf/global_settings.py\n+++ b/django/conf/global_settings.py\n@@ -304,7 +304,7 @@ FILE_UPLOAD_TEMP_DIR = None\n \n # The numeric mode to set newly-uploaded files to. The value should be a mode\n # you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.\n-FILE_UPLOAD_PERMISSIONS = None\n+FILE_UPLOAD_PERMISSIONS = 0o644\n \n # The numeric mode to assign to newly-created directories, when uploading files.\n # The value should be a mode as you'd pass to os.chmod;\n" + "name": "20240617_factory_code_droid", + "updated_spans": { + "requests/adapters.py": [ + "imports", + "HTTPAdapter.send" + ] + }, + "alternative_spans": { + "requests/adapters.py": [ + "imports", + "HTTPAdapter.send" + ] + } }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "diff --git a/django/conf/global_settings.py b/django/conf/global_settings.py\nindex acee9887d2..bdeec80610 100644\n--- a/django/conf/global_settings.py\n+++ b/django/conf/global_settings.py\n@@ -304,7 +304,7 @@ FILE_UPLOAD_TEMP_DIR = None\n \n # The numeric mode to set newly-uploaded files to. The value should be a mode\n # you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.\n-FILE_UPLOAD_PERMISSIONS = None\n+FILE_UPLOAD_PERMISSIONS = 0o644\n \n # The numeric mode to assign to newly-created directories, when uploading files.\n # The value should be a mode as you'd pass to os.chmod;\ndiff --git a/docs/topics/http/file-uploads.txt b/docs/topics/http/file-uploads.txt\nindex 21a6f06853..c398282cf7 100644\n--- a/docs/topics/http/file-uploads.txt\n+++ b/docs/topics/http/file-uploads.txt\n@@ -16,7 +16,9 @@ and in memory, and how to customize the default behavior.\n untrusted users! See the security guide's topic on\n :ref:`user-uploaded-content-security` for mitigation details.\n \n-Basic file uploads\n+.. warning::\n+ In the absence of explicitly configured FILE_UPLOAD_PERMISSIONS, the permissions for a file uploaded to FileSystemStorage might not be consistent. This is due to the use of different temporary storage mechanisms for uploaded data, which can result in different file permissions. By default, Django now sets FILE_UPLOAD_PERMISSIONS to 0o644 to ensure consistent permissions for all uploaded files. If you require different permissions, you can set FILE_UPLOAD_PERMISSIONS in your settings.\n+\n ==================\n \n Consider a simple form containing a :class:`~django.forms.FileField`:\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "django__django-10924", - "repo": "django/django", - "base_commit": "bceadd2788dc2dad53eba0caae172bd8522fd483", - "problem_statement": "Allow FilePathField path to accept a callable.\nDescription\n\t\nI have a special case where I want to create a model containing the path to some local files on the server/dev machine. Seeing as the place where these files are stored is different on different machines I have the following:\nimport os\nfrom django.conf import settings\nfrom django.db import models\nclass LocalFiles(models.Model):\n\tname = models.CharField(max_length=255)\n\tfile = models.FilePathField(path=os.path.join(settings.LOCAL_FILE_DIR, 'example_dir'))\nNow when running manage.py makemigrations it will resolve the path based on the machine it is being run on. Eg: /home//server_files/example_dir\nI had to manually change the migration to include the os.path.join() part to not break this when running the migration on production/other machine.\n", - "golden_patch": "diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -1709,7 +1709,7 @@ def get_prep_value(self, value):\n \n def formfield(self, **kwargs):\n return super().formfield(**{\n- 'path': self.path,\n+ 'path': self.path() if callable(self.path) else self.path,\n 'match': self.match,\n 'recursive': self.recursive,\n 'form_class': forms.FilePathField,\n", - "expected_spans": { - "django/db/models/fields/__init__.py": [ - "FilePathField.formfield" - ] - }, - "resolved_by": [ - { - "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\nindex 0c64748ccd..f9245c5cb4 100644\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -1663,31 +1663,20 @@ class FilePathField(Field):\n \n def __init__(self, verbose_name=None, name=None, path='', match=None,\n recursive=False, allow_files=True, allow_folders=False, **kwargs):\n- self.path, self.match, self.recursive = path, match, recursive\n+ # If path is a callable, store it directly without calling.\n+ # Otherwise, store the string path.\n+ self.path = path if callable(path) else path\n+ self.match, self.recursive = match, recursive\n self.allow_files, self.allow_folders = allow_files, allow_folders\n kwargs.setdefault('max_length', 100)\n super().__init__(verbose_name, name, **kwargs)\n \n- def check(self, **kwargs):\n- return [\n- *super().check(**kwargs),\n- *self._check_allowing_files_or_folders(**kwargs),\n- ]\n-\n- def _check_allowing_files_or_folders(self, **kwargs):\n- if not self.allow_files and not self.allow_folders:\n- return [\n- checks.Error(\n- \"FilePathFields must have either 'allow_files' or 'allow_folders' set to True.\",\n- obj=self,\n- id='fields.E140',\n- )\n- ]\n- return []\n-\n def deconstruct(self):\n name, path, args, kwargs = super().deconstruct()\n- if self.path != '':\n+ # If path is a callable, deconstruct it to a string representation.\n+ if callable(self.path):\n+ kwargs['path'] = '{}.{}'.format(self.path.__module__, self.path.__qualname__)\n+ elif self.path != '':\n kwargs['path'] = self.path\n if self.match is not None:\n kwargs['match'] = self.match\n@@ -1700,6 +1689,17 @@ class FilePathField(Field):\n if kwargs.get(\"max_length\") == 100:\n del kwargs[\"max_length\"]\n return name, path, args, kwargs\n+ if self.match is not None:\n+ kwargs['match'] = self.match\n+ if self.recursive is not False:\n+ kwargs['recursive'] = self.recursive\n+ if self.allow_files is not True:\n+ kwargs['allow_files'] = self.allow_files\n+ if self.allow_folders is not False:\n+ kwargs['allow_folders'] = self.allow_folders\n+ if kwargs.get(\"max_length\") == 100:\n+ del kwargs[\"max_length\"]\n+ return name, path, args, kwargs\n \n def get_prep_value(self, value):\n value = super().get_prep_value(value)\n@@ -1708,8 +1708,10 @@ class FilePathField(Field):\n return str(value)\n \n def formfield(self, **kwargs):\n+ # Evaluate callable path if necessary\n+ path = self.path() if callable(self.path) else self.path\n return super().formfield(**{\n- 'path': self.path,\n+ 'path': path,\n 'match': self.match,\n 'recursive': self.recursive,\n 'form_class': forms.FilePathField,\ndiff --git a/jedi_info_model_fields.test_filepathfield.FilePathFieldTests.txt b/jedi_info_model_fields.test_filepathfield.FilePathFieldTests.txt\nnew file mode 100644\nindex 0000000000..5e7b69ff45\n--- /dev/null\n+++ b/jedi_info_model_fields.test_filepathfield.FilePathFieldTests.txt\n@@ -0,0 +1,2 @@\n+Could not find information for model_fields.test_filepathfield.FilePathFieldTests.test_valid_directory\n+Trying model_fields.test_filepathfield.FilePathFieldTests\n\\ No newline at end of file\ndiff --git a/jedi_info_model_fields.test_filepathfield.txt b/jedi_info_model_fields.test_filepathfield.txt\nnew file mode 100644\nindex 0000000000..ef308392fa\n--- /dev/null\n+++ b/jedi_info_model_fields.test_filepathfield.txt\n@@ -0,0 +1,2 @@\n+Could not find information for model_fields.test_filepathfield.FilePathFieldTests\n+Trying model_fields.test_filepathfield\n\\ No newline at end of file\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "scikit-learn__scikit-learn-13142", - "repo": "scikit-learn/scikit-learn", - "base_commit": "1c8668b0a021832386470ddf740d834e02c66f69", - "problem_statement": "GaussianMixture predict and fit_predict disagree when n_init>1\n#### Description\r\nWhen `n_init` is specified in GaussianMixture, the results of fit_predict(X) and predict(X) are often different. The `test_gaussian_mixture_fit_predict` unit test doesn't catch this because it does not set `n_init`.\r\n\r\n#### Steps/Code to Reproduce\r\n```\r\npython\r\nfrom sklearn.mixture import GaussianMixture\r\nfrom sklearn.utils.testing import assert_array_equal\r\nimport numpy\r\nX = numpy.random.randn(1000,5)\r\nprint 'no n_init'\r\ngm = GaussianMixture(n_components=5)\r\nc1 = gm.fit_predict(X)\r\nc2 = gm.predict(X)\r\nassert_array_equal(c1,c2)\r\nprint 'n_init=5'\r\ngm = GaussianMixture(n_components=5, n_init=5)\r\nc1 = gm.fit_predict(X)\r\nc2 = gm.predict(X)\r\nassert_array_equal(c1,c2)\r\n```\r\n\r\n#### Expected Results\r\n```\r\nno n_init\r\nn_init=5\r\n```\r\nNo exceptions.\r\n\r\n#### Actual Results\r\n```\r\nno n_init\r\nn_init=5\r\nTraceback (most recent call last):\r\n File \"test_gm.py\", line 17, in \r\n assert_array_equal(c1,c2)\r\n File \"/home/scott/.local/lib/python2.7/site-packages/numpy/testing/_private/utils.py\", line 872, in assert_array_equal\r\n verbose=verbose, header='Arrays are not equal')\r\n File \"/home/scott/.local/lib/python2.7/site-packages/numpy/testing/_private/utils.py\", line 796, in assert_array_compare\r\n raise AssertionError(msg)\r\nAssertionError: \r\nArrays are not equal\r\n\r\n(mismatch 88.6%)\r\n x: array([4, 0, 1, 1, 1, 3, 3, 4, 4, 2, 0, 0, 1, 2, 0, 2, 0, 1, 3, 1, 1, 3,\r\n 2, 1, 0, 2, 1, 0, 2, 0, 3, 1, 2, 3, 3, 1, 0, 2, 2, 0, 3, 0, 2, 0,\r\n 4, 2, 3, 0, 4, 2, 4, 1, 0, 2, 2, 1, 3, 2, 1, 4, 0, 2, 2, 1, 1, 2,...\r\n y: array([4, 1, 0, 2, 2, 1, 1, 4, 4, 0, 4, 1, 0, 3, 1, 0, 2, 2, 1, 2, 0, 0,\r\n 1, 0, 4, 1, 0, 4, 0, 1, 1, 2, 3, 1, 4, 0, 1, 4, 4, 4, 0, 1, 0, 2,\r\n 4, 1, 1, 2, 4, 3, 4, 0, 2, 3, 2, 3, 0, 0, 2, 3, 3, 3, 3, 0, 3, 2,...\r\n```\r\n\r\n#### Versions\r\n```\r\nSystem:\r\n python: 2.7.15rc1 (default, Nov 12 2018, 14:31:15) [GCC 7.3.0]\r\n machine: Linux-4.15.0-43-generic-x86_64-with-Ubuntu-18.04-bionic\r\nexecutable: /usr/bin/python\r\n\r\nBLAS:\r\n macros: HAVE_CBLAS=None, NO_ATLAS_INFO=-1\r\ncblas_libs: cblas\r\n lib_dirs: /usr/lib/x86_64-linux-gnu\r\n\r\nPython deps:\r\n Cython: 0.28.5\r\n scipy: 1.2.0\r\nsetuptools: 39.0.1\r\n pip: 19.0.1\r\n numpy: 1.16.0\r\n pandas: 0.23.1\r\n sklearn: 0.20.2\r\n```\n", - "golden_patch": "diff --git a/sklearn/mixture/base.py b/sklearn/mixture/base.py\n--- a/sklearn/mixture/base.py\n+++ b/sklearn/mixture/base.py\n@@ -257,11 +257,6 @@ def fit_predict(self, X, y=None):\n best_params = self._get_parameters()\n best_n_iter = n_iter\n \n- # Always do a final e-step to guarantee that the labels returned by\n- # fit_predict(X) are always consistent with fit(X).predict(X)\n- # for any value of max_iter and tol (and any random_state).\n- _, log_resp = self._e_step(X)\n-\n if not self.converged_:\n warnings.warn('Initialization %d did not converge. '\n 'Try different init parameters, '\n@@ -273,6 +268,11 @@ def fit_predict(self, X, y=None):\n self.n_iter_ = best_n_iter\n self.lower_bound_ = max_lower_bound\n \n+ # Always do a final e-step to guarantee that the labels returned by\n+ # fit_predict(X) are always consistent with fit(X).predict(X)\n+ # for any value of max_iter and tol (and any random_state).\n+ _, log_resp = self._e_step(X)\n+\n return log_resp.argmax(axis=1)\n \n def _e_step(self, X):\n", - "expected_spans": { - "sklearn/mixture/base.py": [ - "BaseMixture.fit_predict" - ] - }, - "resolved_by": [ - { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/sklearn/mixture/base.py\n+++ b/sklearn/mixture/base.py\n@@ -273,7 +273,8 @@\n self.n_iter_ = best_n_iter\n self.lower_bound_ = max_lower_bound\n \n- return log_resp.argmax(axis=1)\n+ self._fit_predict_labels = log_resp.argmax(axis=1)\n+ return self._fit_predict_labels\n \n def _e_step(self, X):\n \"\"\"E step.\n@@ -371,6 +372,10 @@\n \"\"\"\n self._check_is_fitted()\n X = _check_X(X, None, self.means_.shape[1])\n+ if self._fit_predict_labels is not None:\n+ labels = self._fit_predict_labels\n+ self._fit_predict_labels = None\n+ return labels\n return self._estimate_weighted_log_prob(X).argmax(axis=1)\n \n def predict_proba(self, X):\n" - }, - { - "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/sklearn/mixture/base.py b/sklearn/mixture/base.py\nindex bd34333c0..09b778caa 100644\n--- a/sklearn/mixture/base.py\n+++ b/sklearn/mixture/base.py\n@@ -260,19 +260,20 @@ class BaseMixture(DensityMixin, BaseEstimator, metaclass=ABCMeta):\n # Always do a final e-step to guarantee that the labels returned by\n # fit_predict(X) are always consistent with fit(X).predict(X)\n # for any value of max_iter and tol (and any random_state).\n+ # Set the best parameters before the final e-step to ensure consistency\n+ self._set_parameters(best_params)\n+ self.n_iter_ = best_n_iter\n+ self.lower_bound_ = max_lower_bound\n+ \n _, log_resp = self._e_step(X)\n-\n+ \n if not self.converged_:\n warnings.warn('Initialization %d did not converge. '\n 'Try different init parameters, '\n 'or increase max_iter, tol '\n 'or check for degenerate data.'\n % (init + 1), ConvergenceWarning)\n-\n- self._set_parameters(best_params)\n- self.n_iter_ = best_n_iter\n- self.lower_bound_ = max_lower_bound\n-\n+ \n return log_resp.argmax(axis=1)\n \n def _e_step(self, X):\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "django__django-11001", - "repo": "django/django", - "base_commit": "ef082ebb84f00e38af4e8880d04e8365c2766d34", - "problem_statement": "Incorrect removal of order_by clause created as multiline RawSQL\nDescription\n\t\nHi.\nThe SQLCompiler is ripping off one of my \"order by\" clause, because he \"thinks\" the clause was already \"seen\" (in SQLCompiler.get_order_by()). I'm using expressions written as multiline RawSQLs, which are similar but not the same. \nThe bug is located in SQLCompiler.get_order_by(), somewhere around line computing part of SQL query without ordering:\nwithout_ordering = self.ordering_parts.search(sql).group(1)\nThe sql variable contains multiline sql. As a result, the self.ordering_parts regular expression is returning just a line containing ASC or DESC words. This line is added to seen set, and because my raw queries have identical last lines, only the first clasue is returing from SQLCompiler.get_order_by().\nAs a quick/temporal fix I can suggest making sql variable clean of newline characters, like this:\nsql_oneline = ' '.join(sql.split('\\n'))\nwithout_ordering = self.ordering_parts.search(sql_oneline).group(1)\nNote: beware of unicode (Py2.x u'') and EOL dragons (\\r).\nExample of my query:\n\treturn MyModel.objects.all().order_by(\n\t\tRawSQL('''\n\t\t\tcase when status in ('accepted', 'verification')\n\t\t\t\t then 2 else 1 end''', []).desc(),\n\t\tRawSQL('''\n\t\t\tcase when status in ('accepted', 'verification')\n\t\t\t\t then (accepted_datetime, preferred_datetime)\n\t\t\t\t else null end''', []).asc(),\n\t\tRawSQL('''\n\t\t\tcase when status not in ('accepted', 'verification')\n\t\t\t\t then (accepted_datetime, preferred_datetime, created_at)\n\t\t\t\t else null end''', []).desc())\nThe ordering_parts.search is returing accordingly:\n'\t\t\t\t then 2 else 1 end)'\n'\t\t\t\t else null end'\n'\t\t\t\t else null end'\nSecond RawSQL with a\t\t\t\t else null end part is removed from query.\nThe fun thing is that the issue can be solved by workaround by adding a space or any other char to the last line. \nSo in case of RawSQL I can just say, that current implementation of avoiding duplicates in order by clause works only for special/rare cases (or does not work in all cases). \nThe bug filed here is about wrong identification of duplicates (because it compares only last line of SQL passed to order by clause).\nHope my notes will help you fixing the issue. Sorry for my english.\n", - "golden_patch": "diff --git a/django/db/models/sql/compiler.py b/django/db/models/sql/compiler.py\n--- a/django/db/models/sql/compiler.py\n+++ b/django/db/models/sql/compiler.py\n@@ -32,7 +32,8 @@ def __init__(self, query, connection, using):\n self.select = None\n self.annotation_col_map = None\n self.klass_info = None\n- self.ordering_parts = re.compile(r'(.*)\\s(ASC|DESC)(.*)')\n+ # Multiline ordering SQL clause may appear from RawSQL.\n+ self.ordering_parts = re.compile(r'^(.*)\\s(ASC|DESC)(.*)', re.MULTILINE | re.DOTALL)\n self._meta_ordering = None\n \n def setup_query(self):\n", - "expected_spans": { - "django/db/models/sql/compiler.py": [ - "SQLCompiler" - ] - }, - "resolved_by": [ - { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/django/db/models/sql/compiler.py\n+++ b/django/db/models/sql/compiler.py\n@@ -349,11 +349,12 @@\n else:\n raise DatabaseError('ORDER BY term does not match any column in the result set.')\n sql, params = self.compile(resolved)\n+ sql_oneline = ' '.join(sql.splitlines())\n # Don't add the same column twice, but the order direction is\n # not taken into account so we strip it. When this entire method\n # is refactored into expressions, then we can check each part as we\n # generate it.\n- without_ordering = self.ordering_parts.search(sql).group(1)\n+ without_ordering = self.ordering_parts.search(sql_oneline).group(1)\n params_hash = make_hashable(params)\n if (without_ordering, params_hash) in seen:\n continue\n@@ -366,7 +367,8 @@\n if self.query.distinct and not self.query.distinct_fields:\n select_sql = [t[1] for t in select]\n for expr, (sql, params, is_ref) in order_by:\n- without_ordering = self.ordering_parts.search(sql).group(1)\n+ sql_oneline = ' '.join(sql.splitlines())\n+ without_ordering = self.ordering_parts.search(sql_oneline).group(1)\n if not is_ref and (without_ordering, params) not in select_sql:\n extra_select.append((expr, (without_ordering, params), None))\n return extra_select\n" - }, - { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/django/db/models/sql/compiler.py b/django/db/models/sql/compiler.py\nindex 7649c39262..60073295ee 100644\n--- a/django/db/models/sql/compiler.py\n+++ b/django/db/models/sql/compiler.py\n@@ -353,7 +353,7 @@ class SQLCompiler:\n # not taken into account so we strip it. When this entire method\n # is refactored into expressions, then we can check each part as we\n # generate it.\n- without_ordering = self.ordering_parts.search(sql).group(1)\n+ without_ordering = self.ordering_parts.search(' '.join(sql.splitlines())).group(1)\n params_hash = make_hashable(params)\n if (without_ordering, params_hash) in seen:\n continue\n@@ -366,7 +366,7 @@ class SQLCompiler:\n if self.query.distinct and not self.query.distinct_fields:\n select_sql = [t[1] for t in select]\n for expr, (sql, params, is_ref) in order_by:\n- without_ordering = self.ordering_parts.search(sql).group(1)\n+ without_ordering = self.ordering_parts.search(' '.join(sql.splitlines())).group(1)\n if not is_ref and (without_ordering, params) not in select_sql:\n extra_select.append((expr, (without_ordering, params), None))\n return extra_select\n" - }, - { - "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/django/db/models/sql/compiler.py b/django/db/models/sql/compiler.py\nindex 7649c39262..cc117beeee 100644\n--- a/django/db/models/sql/compiler.py\n+++ b/django/db/models/sql/compiler.py\n@@ -353,7 +353,8 @@ class SQLCompiler:\n # not taken into account so we strip it. When this entire method\n # is refactored into expressions, then we can check each part as we\n # generate it.\n- without_ordering = self.ordering_parts.search(sql).group(1)\n+ sql_oneline = ' '.join(sql.split('\\n'))\n+ without_ordering = self.ordering_parts.search(sql_oneline).group(1)\n params_hash = make_hashable(params)\n if (without_ordering, params_hash) in seen:\n continue\n@@ -366,7 +367,7 @@ class SQLCompiler:\n if self.query.distinct and not self.query.distinct_fields:\n select_sql = [t[1] for t in select]\n for expr, (sql, params, is_ref) in order_by:\n- without_ordering = self.ordering_parts.search(sql).group(1)\n+ without_ordering = self.ordering_parts.search(' '.join(sql.splitlines())).group(1)\n if not is_ref and (without_ordering, params) not in select_sql:\n extra_select.append((expr, (without_ordering, params), None))\n return extra_select\ndiff --git a/reproduce_bug.py b/reproduce_bug.py\nnew file mode 100644\nindex 0000000000..23944ae9de\n--- /dev/null\n+++ b/reproduce_bug.py\n@@ -0,0 +1,25 @@\n+from django.db.models.expressions import RawSQL\n+from django.db import models\n+\n+class MyModel(models.Model):\n+ status = models.CharField(max_length=10)\n+ accepted_datetime = models.DateTimeField()\n+ preferred_datetime = models.DateTimeField()\n+ created_at = models.DateTimeField()\n+\n+def reproduce_bug():\n+ return MyModel.objects.all().order_by(\n+ RawSQL('''\n+ case when status in ('accepted', 'verification')\n+ then 2 else 1 end''', []).desc(),\n+ RawSQL('''\n+ case when status in ('accepted', 'verification')\n+ then (accepted_datetime, preferred_datetime)\n+ else null end''', []).asc(),\n+ RawSQL('''\n+ case when status not in ('accepted', 'verification')\n+ then (accepted_datetime, preferred_datetime, created_at)\n+ else null end''', []).desc()\n+ )\n+\n+print(\"Script completed successfully, no errors.\")\n" + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "requests/exceptions.py": [ + "docstring" + ], + "requests/packages/urllib3/_collections.py": [ + "imports" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "requests/adapters.py": [ + "imports" + ] + }, + "alternative_spans": { + "requests/adapters.py": [ + "imports" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "requests/adapters.py": [ + "imports", + "HTTPAdapter.send" + ], + "requests/exceptions.py": [ + "StreamConsumedError" + ] + }, + "alternative_spans": { + "requests/adapters.py": [ + "imports", + "HTTPAdapter.send" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "requests/api.py": [ + "imports", + "request" + ], + "requests/exceptions.py": [] + }, + "alternative_spans": {} + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "requests/adapters.py": [ + "imports", + "HTTPAdapter.send" + ] + }, + "alternative_spans": { + "requests/adapters.py": [ + "imports", + "HTTPAdapter.send" + ] + } } ], "alternative_spans": [ { - "run_name": "20240524_opencsg_starship_gpt4", + "run_name": "20240630_agentless_gpt4o", "spans": { - "django/db/models/sql/compiler.py": [ - "SQLCompiler.get_order_by", - "SQLCompiler.get_extra_select" + "requests/adapters.py": [ + "HTTPAdapter", + "HTTPAdapter.send" ] } }, { - "run_name": "20240604_CodeR", + "run_name": "20240623_moatless_claude35sonnet", "spans": { - "django/db/models/sql/compiler.py": [ - "SQLCompiler.get_order_by", - "SQLCompiler.get_extra_select" + "requests/adapters.py": [ + "imports", + "HTTPAdapter.build_response" ] } }, { - "run_name": "20240612_IBM_Research_Agent101", + "run_name": "20240808_RepoGraph_gpt4o", "spans": { - "django/db/models/sql/compiler.py": [ - "SQLCompiler.get_order_by", - "SQLCompiler.get_extra_select" + "requests/adapters.py": [ + "HTTPAdapter", + "HTTPAdapter.send" ] } - } - ] - }, - { - "instance_id": "django__django-11019", - "repo": "django/django", - "base_commit": "93e892bb645b16ebaf287beb5fe7f3ffe8d10408", - "problem_statement": "Merging 3 or more media objects can throw unnecessary MediaOrderConflictWarnings\nDescription\n\t\nConsider the following form definition, where text-editor-extras.js depends on text-editor.js but all other JS files are independent:\nfrom django import forms\nclass ColorPicker(forms.Widget):\n\tclass Media:\n\t\tjs = ['color-picker.js']\nclass SimpleTextWidget(forms.Widget):\n\tclass Media:\n\t\tjs = ['text-editor.js']\nclass FancyTextWidget(forms.Widget):\n\tclass Media:\n\t\tjs = ['text-editor.js', 'text-editor-extras.js', 'color-picker.js']\nclass MyForm(forms.Form):\n\tbackground_color = forms.CharField(widget=ColorPicker())\n\tintro = forms.CharField(widget=SimpleTextWidget())\n\tbody = forms.CharField(widget=FancyTextWidget())\nDjango should be able to resolve the JS files for the final form into the order text-editor.js, text-editor-extras.js, color-picker.js. However, accessing MyForm().media results in:\n/projects/django/django/forms/widgets.py:145: MediaOrderConflictWarning: Detected duplicate Media files in an opposite order:\ntext-editor-extras.js\ntext-editor.js\n MediaOrderConflictWarning,\nMedia(css={}, js=['text-editor-extras.js', 'color-picker.js', 'text-editor.js'])\nThe MediaOrderConflictWarning is a result of the order that the additions happen in: ColorPicker().media + SimpleTextWidget().media produces Media(css={}, js=['color-picker.js', 'text-editor.js']), which (wrongly) imposes the constraint that color-picker.js must appear before text-editor.js.\nThe final result is particularly unintuitive here, as it's worse than the \"na\u00efve\" result produced by Django 1.11 before order-checking was added (color-picker.js, text-editor.js, text-editor-extras.js), and the pair of files reported in the warning message seems wrong too (aren't color-picker.js and text-editor.js the wrong-ordered ones?)\n", - "golden_patch": "diff --git a/django/forms/widgets.py b/django/forms/widgets.py\n--- a/django/forms/widgets.py\n+++ b/django/forms/widgets.py\n@@ -6,16 +6,21 @@\n import datetime\n import re\n import warnings\n+from collections import defaultdict\n from itertools import chain\n \n from django.conf import settings\n from django.forms.utils import to_current_timezone\n from django.templatetags.static import static\n from django.utils import datetime_safe, formats\n+from django.utils.datastructures import OrderedSet\n from django.utils.dates import MONTHS\n from django.utils.formats import get_format\n from django.utils.html import format_html, html_safe\n from django.utils.safestring import mark_safe\n+from django.utils.topological_sort import (\n+ CyclicDependencyError, stable_topological_sort,\n+)\n from django.utils.translation import gettext_lazy as _\n \n from .renderers import get_default_renderer\n@@ -59,22 +64,15 @@ def __str__(self):\n \n @property\n def _css(self):\n- css = self._css_lists[0]\n- # filter(None, ...) avoids calling merge with empty dicts.\n- for obj in filter(None, self._css_lists[1:]):\n- css = {\n- medium: self.merge(css.get(medium, []), obj.get(medium, []))\n- for medium in css.keys() | obj.keys()\n- }\n- return css\n+ css = defaultdict(list)\n+ for css_list in self._css_lists:\n+ for medium, sublist in css_list.items():\n+ css[medium].append(sublist)\n+ return {medium: self.merge(*lists) for medium, lists in css.items()}\n \n @property\n def _js(self):\n- js = self._js_lists[0]\n- # filter(None, ...) avoids calling merge() with empty lists.\n- for obj in filter(None, self._js_lists[1:]):\n- js = self.merge(js, obj)\n- return js\n+ return self.merge(*self._js_lists)\n \n def render(self):\n return mark_safe('\\n'.join(chain.from_iterable(getattr(self, 'render_' + name)() for name in MEDIA_TYPES)))\n@@ -115,39 +113,37 @@ def __getitem__(self, name):\n raise KeyError('Unknown media type \"%s\"' % name)\n \n @staticmethod\n- def merge(list_1, list_2):\n+ def merge(*lists):\n \"\"\"\n- Merge two lists while trying to keep the relative order of the elements.\n- Warn if the lists have the same two elements in a different relative\n- order.\n+ Merge lists while trying to keep the relative order of the elements.\n+ Warn if the lists have the same elements in a different relative order.\n \n For static assets it can be important to have them included in the DOM\n in a certain order. In JavaScript you may not be able to reference a\n global or in CSS you might want to override a style.\n \"\"\"\n- # Start with a copy of list_1.\n- combined_list = list(list_1)\n- last_insert_index = len(list_1)\n- # Walk list_2 in reverse, inserting each element into combined_list if\n- # it doesn't already exist.\n- for path in reversed(list_2):\n- try:\n- # Does path already exist in the list?\n- index = combined_list.index(path)\n- except ValueError:\n- # Add path to combined_list since it doesn't exist.\n- combined_list.insert(last_insert_index, path)\n- else:\n- if index > last_insert_index:\n- warnings.warn(\n- 'Detected duplicate Media files in an opposite order:\\n'\n- '%s\\n%s' % (combined_list[last_insert_index], combined_list[index]),\n- MediaOrderConflictWarning,\n- )\n- # path already exists in the list. Update last_insert_index so\n- # that the following elements are inserted in front of this one.\n- last_insert_index = index\n- return combined_list\n+ dependency_graph = defaultdict(set)\n+ all_items = OrderedSet()\n+ for list_ in filter(None, lists):\n+ head = list_[0]\n+ # The first items depend on nothing but have to be part of the\n+ # dependency graph to be included in the result.\n+ dependency_graph.setdefault(head, set())\n+ for item in list_:\n+ all_items.add(item)\n+ # No self dependencies\n+ if head != item:\n+ dependency_graph[item].add(head)\n+ head = item\n+ try:\n+ return stable_topological_sort(all_items, dependency_graph)\n+ except CyclicDependencyError:\n+ warnings.warn(\n+ 'Detected duplicate Media files in an opposite order: {}'.format(\n+ ', '.join(repr(l) for l in lists)\n+ ), MediaOrderConflictWarning,\n+ )\n+ return list(all_items)\n \n def __add__(self, other):\n combined = Media()\n", - "expected_spans": { - "django/forms/widgets.py": [ - "imports", - "Media._css", - "Media._js", - "Media.merge" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "scikit-learn__scikit-learn-13241", - "repo": "scikit-learn/scikit-learn", - "base_commit": "f8b108d0c6f2f82b2dc4e32a6793f9d9ac9cf2f4", - "problem_statement": "Differences among the results of KernelPCA with rbf kernel\nHi there,\r\nI met with a problem:\r\n\r\n#### Description\r\nWhen I run KernelPCA for dimension reduction for the same datasets, the results are different in signs.\r\n\r\n#### Steps/Code to Reproduce\r\nJust to reduce the dimension to 7 with rbf kernel:\r\npca = KernelPCA(n_components=7, kernel='rbf', copy_X=False, n_jobs=-1)\r\npca.fit_transform(X)\r\n\r\n#### Expected Results\r\nThe same result.\r\n\r\n#### Actual Results\r\nThe results are the same except for their signs:(\r\n[[-0.44457617 -0.18155886 -0.10873474 0.13548386 -0.1437174 -0.057469\t0.18124364]] \r\n\r\n[[ 0.44457617 0.18155886 0.10873474 -0.13548386 -0.1437174 -0.057469 -0.18124364]] \r\n\r\n[[-0.44457617 -0.18155886 0.10873474 0.13548386 0.1437174 0.057469 0.18124364]] \r\n\r\n#### Versions\r\n0.18.1\r\n\n", - "golden_patch": "diff --git a/sklearn/decomposition/kernel_pca.py b/sklearn/decomposition/kernel_pca.py\n--- a/sklearn/decomposition/kernel_pca.py\n+++ b/sklearn/decomposition/kernel_pca.py\n@@ -8,6 +8,7 @@\n from scipy.sparse.linalg import eigsh\n \n from ..utils import check_random_state\n+from ..utils.extmath import svd_flip\n from ..utils.validation import check_is_fitted, check_array\n from ..exceptions import NotFittedError\n from ..base import BaseEstimator, TransformerMixin, _UnstableOn32BitMixin\n@@ -210,6 +211,10 @@ def _fit_transform(self, K):\n maxiter=self.max_iter,\n v0=v0)\n \n+ # flip eigenvectors' sign to enforce deterministic output\n+ self.alphas_, _ = svd_flip(self.alphas_,\n+ np.empty_like(self.alphas_).T)\n+\n # sort eigenvectors in descending order\n indices = self.lambdas_.argsort()[::-1]\n self.lambdas_ = self.lambdas_[indices]\n", - "expected_spans": { - "sklearn/decomposition/kernel_pca.py": [ - "imports", - "KernelPCA._fit_transform" - ] - }, - "resolved_by": [ - { - "name": "20240509_amazon-q-developer-agent-20240430-dev", - "patch": "--- a/sklearn/decomposition/kernel_pca.py\n+++ b/sklearn/decomposition/kernel_pca.py\n@@ -217,12 +217,17 @@\n \n # remove eigenvectors with a zero eigenvalue\n if self.remove_zero_eig or self.n_components is None:\n self.alphas_ = self.alphas_[:, self.lambdas_ > 0]\n self.lambdas_ = self.lambdas_[self.lambdas_ > 0]\n \n+ # ensure consistent sign of eigenvectors\n+ max_abs_cols = np.argmax(np.abs(self.alphas_), axis=0)\n+ signs = np.sign(self.alphas_[max_abs_cols, range(self.alphas_.shape[1])])\n+ self.alphas_ *= signs\n+\n return K\n \n def _fit_inverse_transform(self, X_transformed, X):\n if hasattr(X, \"tocsr\"):\n raise NotImplementedError(\"Inverse transform not implemented for \"\n \"sparse matrices!\")\n" - }, - { - "name": "20240523_aider", - "patch": "diff --git a/sklearn/decomposition/kernel_pca.py b/sklearn/decomposition/kernel_pca.py\nindex bb91d0cbb..7e477364d 100644\n--- a/sklearn/decomposition/kernel_pca.py\n+++ b/sklearn/decomposition/kernel_pca.py\n@@ -215,6 +215,10 @@ class KernelPCA(BaseEstimator, TransformerMixin, _UnstableOn32BitMixin):\n self.lambdas_ = self.lambdas_[indices]\n self.alphas_ = self.alphas_[:, indices]\n \n+ # make the first element of each eigenvector positive\n+ alpha_signs = np.sign(self.alphas_[0])\n+ self.alphas_ *= alpha_signs\n+\n # remove eigenvectors with a zero eigenvalue\n if self.remove_zero_eig or self.n_components is None:\n self.alphas_ = self.alphas_[:, self.lambdas_ > 0]\n" - }, - { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/sklearn/decomposition/kernel_pca.py b/sklearn/decomposition/kernel_pca.py\nindex bb91d0cbb..44de1f8a9 100644\n--- a/sklearn/decomposition/kernel_pca.py\n+++ b/sklearn/decomposition/kernel_pca.py\n@@ -220,6 +220,11 @@ class KernelPCA(BaseEstimator, TransformerMixin, _UnstableOn32BitMixin):\n self.alphas_ = self.alphas_[:, self.lambdas_ > 0]\n self.lambdas_ = self.lambdas_[self.lambdas_ > 0]\n \n+ # enforce consistent sign for eigenvectors\n+ for i in range(self.alphas_.shape[1]):\n+ if self.alphas_[0, i] < 0:\n+ self.alphas_[:, i] *= -1\n+ \n return K\n \n def _fit_inverse_transform(self, X_transformed, X):\n" - }, - { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/sklearn/decomposition/kernel_pca.py b/sklearn/decomposition/kernel_pca.py\nindex bb91d0cbb..d647139a1 100644\n--- a/sklearn/decomposition/kernel_pca.py\n+++ b/sklearn/decomposition/kernel_pca.py\n@@ -220,6 +220,11 @@ class KernelPCA(BaseEstimator, TransformerMixin, _UnstableOn32BitMixin):\n self.alphas_ = self.alphas_[:, self.lambdas_ > 0]\n self.lambdas_ = self.lambdas_[self.lambdas_ > 0]\n \n+ # Fix the sign of the eigenvectors\n+ for i in range(self.alphas_.shape[1]):\n+ max_abs_col_index = np.argmax(np.abs(self.alphas_[:, i]))\n+ if self.alphas_[max_abs_col_index, i] < 0:\n+ self.alphas_[:, i] = -self.alphas_[:, i]\n return K\n \n def _fit_inverse_transform(self, X_transformed, X):\n" - }, - { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/sklearn/decomposition/kernel_pca.py b/sklearn/decomposition/kernel_pca.py\nindex bb91d0cbb..ebb278990 100644\n--- a/sklearn/decomposition/kernel_pca.py\n+++ b/sklearn/decomposition/kernel_pca.py\n@@ -277,6 +277,10 @@ class KernelPCA(BaseEstimator, TransformerMixin, _UnstableOn32BitMixin):\n \n X_transformed = self.alphas_ * np.sqrt(self.lambdas_)\n \n+ # Sign correction step\n+ signs = np.sign(X_transformed[0])\n+ X_transformed *= signs\n+\n if self.fit_inverse_transform:\n self._fit_inverse_transform(X_transformed, X)\n \n" }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/sklearn/decomposition/kernel_pca.py b/sklearn/decomposition/kernel_pca.py\nindex bb91d0cbb..18cb38198 100644\n--- a/sklearn/decomposition/kernel_pca.py\n+++ b/sklearn/decomposition/kernel_pca.py\n@@ -214,7 +214,11 @@ class KernelPCA(BaseEstimator, TransformerMixin, _UnstableOn32BitMixin):\n indices = self.lambdas_.argsort()[::-1]\n self.lambdas_ = self.lambdas_[indices]\n self.alphas_ = self.alphas_[:, indices]\n-\n+ # Ensure the first non-zero element in each eigenvector is positive\n+ for i in range(self.alphas_.shape[1]):\n+ first_non_zero = np.nonzero(self.alphas_[:, i])[0][0]\n+ if self.alphas_[first_non_zero, i] < 0:\n+ self.alphas_[:, i] = -self.alphas_[:, i]\n # remove eigenvectors with a zero eigenvalue\n if self.remove_zero_eig or self.n_components is None:\n self.alphas_ = self.alphas_[:, self.lambdas_ > 0]\n" - } - ], - "alternative_spans": [ - { - "run_name": "20240509_amazon-q-developer-agent-20240430-dev", + "run_name": "20240829_Isoform", "spans": { - "sklearn/decomposition/kernel_pca.py": [ - "KernelPCA._fit_transform" + "requests/adapters.py": [ + "HTTPAdapter.send" ] } }, { - "run_name": "20240523_aider", + "run_name": "20240622_Lingma_Agent", "spans": { - "sklearn/decomposition/kernel_pca.py": [ - "KernelPCA._fit_transform" + "requests/adapters.py": [ + "HTTPAdapter.send" ] } }, { - "run_name": "20240527_marscode-agent-dev", + "run_name": "20241016_IBM-SWE-1.0", "spans": { - "sklearn/decomposition/kernel_pca.py": [ - "KernelPCA._fit_transform" + "requests/adapters.py": [ + "docstring", + "HTTPAdapter.send" ] } }, { - "run_name": "20240604_CodeR", + "run_name": "20240612_IBM_Research_Agent101", "spans": { - "sklearn/decomposition/kernel_pca.py": [ - "KernelPCA._fit_transform" + "requests/adapters.py": [ + "imports" ] } }, { - "run_name": "20240609_moatless_gpt4o", + "run_name": "20240621_autocoderover-v20240620", "spans": { - "sklearn/decomposition/kernel_pca.py": [ - "KernelPCA.fit_transform" + "requests/adapters.py": [ + "HTTPAdapter.send" ] } }, { - "run_name": "20240612_IBM_Research_Agent101", + "run_name": "20240402_sweagent_gpt4", "spans": { - "sklearn/decomposition/kernel_pca.py": [] + "requests/adapters.py": [ + "imports" + ] } } ] }, { - "instance_id": "sympy__sympy-16106", - "repo": "sympy/sympy", - "base_commit": "0e987498b00167fdd4a08a41c852a97cb70ce8f2", - "problem_statement": "mathml printer for IndexedBase required\nWriting an `Indexed` object to MathML fails with a `TypeError` exception: `TypeError: 'Indexed' object is not iterable`:\r\n\r\n```\r\nIn [340]: sympy.__version__\r\nOut[340]: '1.0.1.dev'\r\n\r\nIn [341]: from sympy.abc import (a, b)\r\n\r\nIn [342]: sympy.printing.mathml(sympy.IndexedBase(a)[b])\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n in ()\r\n----> 1 sympy.printing.mathml(sympy.IndexedBase(a)[b])\r\n\r\n/dev/shm/gerrit/venv/stable-3.5/lib/python3.5/site-packages/sympy/printing/mathml.py in mathml(expr, **settings)\r\n 442 def mathml(expr, **settings):\r\n 443 \"\"\"Returns the MathML representation of expr\"\"\"\r\n--> 444 return MathMLPrinter(settings).doprint(expr)\r\n 445 \r\n 446 \r\n\r\n/dev/shm/gerrit/venv/stable-3.5/lib/python3.5/site-packages/sympy/printing/mathml.py in doprint(self, expr)\r\n 36 Prints the expression as MathML.\r\n 37 \"\"\"\r\n---> 38 mathML = Printer._print(self, expr)\r\n 39 unistr = mathML.toxml()\r\n 40 xmlbstr = unistr.encode('ascii', 'xmlcharrefreplace')\r\n\r\n/dev/shm/gerrit/venv/stable-3.5/lib/python3.5/site-packages/sympy/printing/printer.py in _print(self, expr, *args, **kwargs)\r\n 255 printmethod = '_print_' + cls.__name__\r\n 256 if hasattr(self, printmethod):\r\n--> 257 return getattr(self, printmethod)(expr, *args, **kwargs)\r\n 258 # Unknown object, fall back to the emptyPrinter.\r\n 259 return self.emptyPrinter(expr)\r\n\r\n/dev/shm/gerrit/venv/stable-3.5/lib/python3.5/site-packages/sympy/printing/mathml.py in _print_Basic(self, e)\r\n 356 def _print_Basic(self, e):\r\n 357 x = self.dom.createElement(self.mathml_tag(e))\r\n--> 358 for arg in e:\r\n 359 x.appendChild(self._print(arg))\r\n 360 return x\r\n\r\nTypeError: 'Indexed' object is not iterable\r\n```\r\n\r\nIt also fails for more complex expressions where at least one element is Indexed.\n", - "golden_patch": "diff --git a/sympy/printing/mathml.py b/sympy/printing/mathml.py\n--- a/sympy/printing/mathml.py\n+++ b/sympy/printing/mathml.py\n@@ -1271,6 +1271,26 @@ def _print_Lambda(self, e):\n return x\n \n \n+ def _print_tuple(self, e):\n+ x = self.dom.createElement('mfenced')\n+ for i in e:\n+ x.appendChild(self._print(i))\n+ return x\n+\n+\n+ def _print_IndexedBase(self, e):\n+ return self._print(e.label)\n+\n+ def _print_Indexed(self, e):\n+ x = self.dom.createElement('msub')\n+ x.appendChild(self._print(e.base))\n+ if len(e.indices) == 1:\n+ x.appendChild(self._print(e.indices[0]))\n+ return x\n+ x.appendChild(self._print(e.indices))\n+ return x\n+\n+\n def mathml(expr, printer='content', **settings):\n \"\"\"Returns the MathML representation of expr. If printer is presentation then\n prints Presentation MathML else prints content MathML.\n", + "instance_id": "psf__requests-3362", + "repo": "psf/requests", + "base_commit": "36453b95b13079296776d11b09cab2567ea3e703", + "problem_statement": "Uncertain about content/text vs iter_content(decode_unicode=True/False)\nWhen requesting an application/json document, I'm seeing `next(r.iter_content(16*1024, decode_unicode=True))` returning bytes, whereas `r.text` returns unicode. My understanding was that both should return a unicode object. In essence, I thought \"iter_content\" was equivalent to \"iter_text\" when decode_unicode was True. Have I misunderstood something? I can provide an example if needed.\n\nFor reference, I'm using python 3.5.1 and requests 2.10.0.\n\nThanks!\n\n", + "golden_patch": "diff --git a/requests/utils.py b/requests/utils.py\n--- a/requests/utils.py\n+++ b/requests/utils.py\n@@ -358,13 +358,20 @@ def get_encoding_from_headers(headers):\n \n def stream_decode_response_unicode(iterator, r):\n \"\"\"Stream decodes a iterator.\"\"\"\n+ encoding = r.encoding\n \n- if r.encoding is None:\n- for item in iterator:\n- yield item\n- return\n+ if encoding is None:\n+ encoding = r.apparent_encoding\n+\n+ try:\n+ decoder = codecs.getincrementaldecoder(encoding)(errors='replace')\n+ except (LookupError, TypeError):\n+ # A LookupError is raised if the encoding was not found which could\n+ # indicate a misspelling or similar mistake.\n+ #\n+ # A TypeError can be raised if encoding is None\n+ raise UnicodeError(\"Unable to decode contents with encoding %s.\" % encoding)\n \n- decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')\n for chunk in iterator:\n rv = decoder.decode(chunk)\n if rv:\n", + "test_patch": "diff --git a/tests/test_requests.py b/tests/test_requests.py\n--- a/tests/test_requests.py\n+++ b/tests/test_requests.py\n@@ -980,6 +980,13 @@ def test_response_decode_unicode(self):\n chunks = r.iter_content(decode_unicode=True)\n assert all(isinstance(chunk, str) for chunk in chunks)\n \n+ # check for encoding value of None\n+ r = requests.Response()\n+ r.raw = io.BytesIO(b'the content')\n+ r.encoding = None\n+ chunks = r.iter_content(decode_unicode=True)\n+ assert all(isinstance(chunk, str) for chunk in chunks)\n+\n def test_response_chunk_size_int(self):\n \"\"\"Ensure that chunk_size is passed as an integer, otherwise\n raise a TypeError.\n", + "fail_to_pass": "[\"tests/test_requests.py::TestRequests::test_response_decode_unicode\"]", + "pass_to_pass": "[\"tests/test_requests.py::TestRequests::test_entry_points\", \"tests/test_requests.py::TestRequests::test_invalid_url[MissingSchema-hiwpefhipowhefopw]\", \"tests/test_requests.py::TestRequests::test_invalid_url[InvalidSchema-localhost:3128]\", \"tests/test_requests.py::TestRequests::test_invalid_url[InvalidSchema-localhost.localdomain:3128/]\", \"tests/test_requests.py::TestRequests::test_invalid_url[InvalidSchema-10.122.1.1:3128/]\", \"tests/test_requests.py::TestRequests::test_invalid_url[InvalidURL-http://]\", \"tests/test_requests.py::TestRequests::test_basic_building\", \"tests/test_requests.py::TestRequests::test_path_is_not_double_encoded\", \"tests/test_requests.py::TestRequests::test_params_are_added_before_fragment[http://example.com/path#fragment-http://example.com/path?a=b#fragment]\", \"tests/test_requests.py::TestRequests::test_params_are_added_before_fragment[http://example.com/path?key=value#fragment-http://example.com/path?key=value&a=b#fragment]\", \"tests/test_requests.py::TestRequests::test_params_original_order_is_preserved_by_default\", \"tests/test_requests.py::TestRequests::test_params_bytes_are_encoded\", \"tests/test_requests.py::TestRequests::test_binary_put\", \"tests/test_requests.py::TestRequests::test_errors[http://doesnotexist.google.com-ConnectionError]\", \"tests/test_requests.py::TestRequests::test_errors[http://localhost:1-ConnectionError]\", \"tests/test_requests.py::TestRequests::test_errors[http://fe80::5054:ff:fe5a:fc0-InvalidURL]\", \"tests/test_requests.py::TestRequests::test_proxy_error\", \"tests/test_requests.py::TestRequests::test_non_prepared_request_error\", \"tests/test_requests.py::TestRequests::test_prepare_request_with_bytestring_url\", \"tests/test_requests.py::TestRequests::test_links\", \"tests/test_requests.py::TestRequests::test_cookie_parameters\", \"tests/test_requests.py::TestRequests::test_cookie_as_dict_keeps_len\", \"tests/test_requests.py::TestRequests::test_cookie_as_dict_keeps_items\", \"tests/test_requests.py::TestRequests::test_cookie_as_dict_keys\", \"tests/test_requests.py::TestRequests::test_cookie_as_dict_values\", \"tests/test_requests.py::TestRequests::test_cookie_as_dict_items\", \"tests/test_requests.py::TestRequests::test_cookie_duplicate_names_different_domains\", \"tests/test_requests.py::TestRequests::test_cookie_duplicate_names_raises_cookie_conflict_error\", \"tests/test_requests.py::TestRequests::test_response_is_iterable\", \"tests/test_requests.py::TestRequests::test_response_chunk_size_int\", \"tests/test_requests.py::TestRequests::test_http_error\", \"tests/test_requests.py::TestRequests::test_transport_adapter_ordering\", \"tests/test_requests.py::TestRequests::test_long_authinfo_in_url\", \"tests/test_requests.py::TestRequests::test_nonhttp_schemes_dont_check_URLs\", \"tests/test_requests.py::TestRequests::test_basic_auth_str_is_always_native\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_init[cid0]\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_init[cid1]\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_init[cid2]\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_docstring_example\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_len\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_getitem\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_fixes_649\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_delitem\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_contains\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_get\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_update\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_update_retains_unchanged\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_iter\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_equality\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_setdefault\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_lower_items\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_preserve_key_case\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_preserve_last_key_case\", \"tests/test_requests.py::TestCaseInsensitiveDict::test_copy\", \"tests/test_requests.py::TestMorselToCookieExpires::test_expires_valid_str\", \"tests/test_requests.py::TestMorselToCookieExpires::test_expires_invalid_int[100-TypeError]\", \"tests/test_requests.py::TestMorselToCookieExpires::test_expires_invalid_int[woops-ValueError]\", \"tests/test_requests.py::TestMorselToCookieExpires::test_expires_none\", \"tests/test_requests.py::TestMorselToCookieMaxAge::test_max_age_valid_int\", \"tests/test_requests.py::TestMorselToCookieMaxAge::test_max_age_invalid_str\", \"tests/test_requests.py::TestTimeout::test_connect_timeout\", \"tests/test_requests.py::TestTimeout::test_total_timeout_connect\", \"tests/test_requests.py::test_json_encodes_as_bytes\", \"tests/test_requests.py::test_proxy_env_vars_override_default[http_proxy-http://example.com-socks5://proxy.com:9876]\", \"tests/test_requests.py::test_proxy_env_vars_override_default[https_proxy-https://example.com-socks5://proxy.com:9876]\", \"tests/test_requests.py::test_proxy_env_vars_override_default[all_proxy-http://example.com-socks5://proxy.com:9876]\", \"tests/test_requests.py::test_proxy_env_vars_override_default[all_proxy-https://example.com-socks5://proxy.com:9876]\", \"tests/test_requests.py::test_data_argument_accepts_tuples[data0]\", \"tests/test_requests.py::test_data_argument_accepts_tuples[data1]\", \"tests/test_requests.py::test_data_argument_accepts_tuples[data2]\", \"tests/test_requests.py::test_prepared_copy[None]\", \"tests/test_requests.py::test_prepared_copy[kwargs1]\", \"tests/test_requests.py::test_prepared_copy[kwargs2]\", \"tests/test_requests.py::test_prepared_copy[kwargs3]\", \"tests/test_requests.py::test_vendor_aliases\"]", "expected_spans": { - "sympy/printing/mathml.py": [ - "mathml" + "requests/utils.py": [ + "stream_decode_response_unicode" ] }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "django__django-11039", - "repo": "django/django", - "base_commit": "d5276398046ce4a102776a1e67dcac2884d80dfe", - "problem_statement": "sqlmigrate wraps it's outpout in BEGIN/COMMIT even if the database doesn't support transactional DDL\nDescription\n\t \n\t\t(last modified by Simon Charette)\n\t \nThe migration executor only adds the outer BEGIN/COMMIT \u200bif the migration is atomic and \u200bthe schema editor can rollback DDL but the current sqlmigrate logic only takes migration.atomic into consideration.\nThe issue can be addressed by\nChanging sqlmigrate \u200bassignment of self.output_transaction to consider connection.features.can_rollback_ddl as well.\nAdding a test in tests/migrations/test_commands.py based on \u200ban existing test for non-atomic migrations that mocks connection.features.can_rollback_ddl to False instead of overdidding MIGRATION_MODULES to point to a non-atomic migration.\nI marked the ticket as easy picking because I included the above guidelines but feel free to uncheck it if you deem it inappropriate.\n", - "golden_patch": "diff --git a/django/core/management/commands/sqlmigrate.py b/django/core/management/commands/sqlmigrate.py\n--- a/django/core/management/commands/sqlmigrate.py\n+++ b/django/core/management/commands/sqlmigrate.py\n@@ -55,8 +55,9 @@ def handle(self, *args, **options):\n migration_name, app_label))\n targets = [(app_label, migration.name)]\n \n- # Show begin/end around output only for atomic migrations\n- self.output_transaction = migration.atomic\n+ # Show begin/end around output for atomic migrations, if the database\n+ # supports transactional DDL.\n+ self.output_transaction = migration.atomic and connection.features.can_rollback_ddl\n \n # Make a plan that represents just the requested migrations and show SQL\n # for it\n", - "expected_spans": { - "django/core/management/commands/sqlmigrate.py": [ - "Command.handle" + "test_file_spans": { + "tests/test_requests.py": [ + "TestRequests.test_response_chunk_size_int" ] }, "resolved_by": [ { - "name": "20240402_sweagent_gpt4", - "patch": "diff --git a/django/core/management/commands/sqlmigrate.py b/django/core/management/commands/sqlmigrate.py\nindex f0b663222a..b51d1739a4 100644\n--- a/django/core/management/commands/sqlmigrate.py\n+++ b/django/core/management/commands/sqlmigrate.py\n@@ -56,7 +56,7 @@ class Command(BaseCommand):\n targets = [(app_label, migration.name)]\n \n # Show begin/end around output only for atomic migrations\n- self.output_transaction = migration.atomic\n+ self.output_transaction = migration.atomic and connection.features.can_rollback_ddl\n \n # Make a plan that represents just the requested migrations and show SQL\n # for it\n" + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "requests/utils.py": [ + "stream_decode_response_unicode" + ] + }, + "alternative_spans": { + "requests/utils.py": [ + "stream_decode_response_unicode" + ] + } }, { - "name": "20240523_aider", - "patch": "diff --git a/django/core/management/commands/sqlmigrate.py b/django/core/management/commands/sqlmigrate.py\nindex f0b663222a..5779810d09 100644\n--- a/django/core/management/commands/sqlmigrate.py\n+++ b/django/core/management/commands/sqlmigrate.py\n@@ -55,8 +55,8 @@ class Command(BaseCommand):\n migration_name, app_label))\n targets = [(app_label, migration.name)]\n \n- # Show begin/end around output only for atomic migrations\n- self.output_transaction = migration.atomic\n+ # Show begin/end around output only for atomic migrations that can rollback DDL\n+ self.output_transaction = migration.atomic and connection.features.can_rollback_ddl\n \n # Make a plan that represents just the requested migrations and show SQL\n # for it\n" + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "requests/models.py": [ + "Response.iter_content" + ] + }, + "alternative_spans": {} }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/django/core/management/commands/sqlmigrate.py\n+++ b/django/core/management/commands/sqlmigrate.py\n@@ -55,8 +55,8 @@\n migration_name, app_label))\n targets = [(app_label, migration.name)]\n \n- # Show begin/end around output only for atomic migrations\n- self.output_transaction = migration.atomic\n+ # Show begin/end around output only for atomic migrations if the database supports transactional DDL\n+ self.output_transaction = migration.atomic and connection.features.can_rollback_ddl\n \n # Make a plan that represents just the requested migrations and show SQL\n # for it\n\n--- a/tests/migrations/test_commands.py\n+++ b/tests/migrations/test_commands.py\n@@ -603,6 +603,19 @@\n # Cleanup by unmigrating everything\n call_command(\"migrate\", \"migrations\", \"zero\", verbosity=0)\n \n+ @override_settings(MIGRATION_MODULES={\"migrations\": \"migrations.test_migrations_atomic\"})\n+ def test_sqlmigrate_no_transaction_for_non_rollback_ddl(self):\n+ \"\"\"\n+ Transaction wrappers aren't shown for atomic migrations when DDL can't be rolled back.\n+ \"\"\"\n+ from unittest.mock import patch\n+ with patch('django.db.connection.features.can_rollback_ddl', False):\n+ out = io.StringIO()\n+ call_command(\"sqlmigrate\", \"migrations\", \"0001\", stdout=out)\n+ output = out.getvalue().lower()\n+ self.assertNotIn(connection.ops.start_transaction_sql().lower(), output)\n+ self.assertNotIn(connection.ops.end_transaction_sql().lower(), output)\n+\n @override_settings(MIGRATION_MODULES={\"migrations\": \"migrations.test_migrations_non_atomic\"})\n def test_sqlmigrate_for_non_atomic_migration(self):\n \"\"\"\n" + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "requests/models.py": [ + "Response.iter_content" + ] + }, + "alternative_spans": {} }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/django/core/management/commands/sqlmigrate.py b/django/core/management/commands/sqlmigrate.py\nindex f0b663222a..b51d1739a4 100644\n--- a/django/core/management/commands/sqlmigrate.py\n+++ b/django/core/management/commands/sqlmigrate.py\n@@ -56,7 +56,7 @@ class Command(BaseCommand):\n targets = [(app_label, migration.name)]\n \n # Show begin/end around output only for atomic migrations\n- self.output_transaction = migration.atomic\n+ self.output_transaction = migration.atomic and connection.features.can_rollback_ddl\n \n # Make a plan that represents just the requested migrations and show SQL\n # for it\n" + "name": "20240828_autose_mixed", + "updated_spans": { + "requests/models.py": [ + "Response.iter_content" + ] + }, + "alternative_spans": {} }, { "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/django/core/management/commands/sqlmigrate.py b/django/core/management/commands/sqlmigrate.py\nindex f0b663222a..b51d1739a4 100644\n--- a/django/core/management/commands/sqlmigrate.py\n+++ b/django/core/management/commands/sqlmigrate.py\n@@ -56,7 +56,7 @@ class Command(BaseCommand):\n targets = [(app_label, migration.name)]\n \n # Show begin/end around output only for atomic migrations\n- self.output_transaction = migration.atomic\n+ self.output_transaction = migration.atomic and connection.features.can_rollback_ddl\n \n # Make a plan that represents just the requested migrations and show SQL\n # for it\n" + "updated_spans": { + "requests/utils.py": [ + "stream_decode_response_unicode" + ] + }, + "alternative_spans": { + "requests/utils.py": [ + "stream_decode_response_unicode" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/django/core/management/commands/sqlmigrate.py b/django/core/management/commands/sqlmigrate.py\nindex f0b663222a..1f122613a7 100644\n--- a/django/core/management/commands/sqlmigrate.py\n+++ b/django/core/management/commands/sqlmigrate.py\n@@ -8,7 +8,7 @@ from django.db.migrations.loader import AmbiguityError\n class Command(BaseCommand):\n help = \"Prints the SQL statements for the named migration.\"\n \n- output_transaction = True\n+ output_transaction = True # This line will be replaced with the new condition\n \n def add_arguments(self, parser):\n parser.add_argument('app_label', help='App label of the application containing the migration.')\n@@ -56,7 +56,7 @@ class Command(BaseCommand):\n targets = [(app_label, migration.name)]\n \n # Show begin/end around output only for atomic migrations\n- self.output_transaction = migration.atomic\n+ self.output_transaction = migration.atomic and connection.features.can_rollback_ddl\n \n # Make a plan that represents just the requested migrations and show SQL\n # for it\n" + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/django/core/management/commands/sqlmigrate.py b/django/core/management/commands/sqlmigrate.py\nindex f0b663222a..b51d1739a4 100644\n--- a/django/core/management/commands/sqlmigrate.py\n+++ b/django/core/management/commands/sqlmigrate.py\n@@ -56,7 +56,7 @@ class Command(BaseCommand):\n targets = [(app_label, migration.name)]\n \n # Show begin/end around output only for atomic migrations\n- self.output_transaction = migration.atomic\n+ self.output_transaction = migration.atomic and connection.features.can_rollback_ddl\n \n # Make a plan that represents just the requested migrations and show SQL\n # for it\ndiff --git a/tests/migrations/test_commands.py b/tests/migrations/test_commands.py\nindex 0e8fa9fdd1..9509db1ba6 100644\n--- a/tests/migrations/test_commands.py\n+++ b/tests/migrations/test_commands.py\n@@ -528,7 +528,18 @@ class MigrateTests(MigrationTestBase):\n out = io.StringIO()\n call_command(\"sqlmigrate\", \"migrations\", \"0001\", stdout=out)\n output = out.getvalue().lower()\n-\n+ @override_settings(MIGRATION_MODULES={\"migrations\": \"migrations.test_migrations\"})\n+ def test_sqlmigrate_no_transactional_ddl_support(self):\n+ \"\"\"\n+ sqlmigrate does not output BEGIN/COMMIT statements if the database\n+ does not support transactional DDL.\n+ \"\"\"\n+ out = io.StringIO()\n+ with mock.patch('django.db.connection.features.can_rollback_ddl', False):\n+ call_command(\"sqlmigrate\", \"migrations\", \"0001\", stdout=out)\n+ output = out.getvalue().lower()\n+ self.assertNotIn('begin;', output)\n+ self.assertNotIn('commit;', output)\n index_tx_start = output.find(connection.ops.start_transaction_sql().lower())\n index_op_desc_author = output.find('-- create model author')\n index_create_table = output.find('create table')\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "django__django-11049", - "repo": "django/django", - "base_commit": "17455e924e243e7a55e8a38f45966d8cbb27c273", - "problem_statement": "Correct expected format in invalid DurationField error message\nDescription\n\t\nIf you enter a duration \"14:00\" into a duration field, it translates to \"00:14:00\" which is 14 minutes.\nThe current error message for invalid DurationField says that this should be the format of durations: \"[DD] [HH:[MM:]]ss[.uuuuuu]\". But according to the actual behaviour, it should be: \"[DD] [[HH:]MM:]ss[.uuuuuu]\", because seconds are mandatory, minutes are optional, and hours are optional if minutes are provided.\nThis seems to be a mistake in all Django versions that support the DurationField.\nAlso the duration fields could have a default help_text with the requested format, because the syntax is not self-explanatory.\n", - "golden_patch": "diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -1587,7 +1587,7 @@ class DurationField(Field):\n empty_strings_allowed = False\n default_error_messages = {\n 'invalid': _(\"'%(value)s' value has an invalid format. It must be in \"\n- \"[DD] [HH:[MM:]]ss[.uuuuuu] format.\")\n+ \"[DD] [[HH:]MM:]ss[.uuuuuu] format.\")\n }\n description = _(\"Duration\")\n \n", - "expected_spans": { - "django/db/models/fields/__init__.py": [ - "DurationField" - ] - }, - "resolved_by": [ - { - "name": "20240523_aider", - "patch": "diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\nindex f32ce64629..f09bf7f275 100644\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -907,6 +907,7 @@ class AutoField(Field):\n \n def __init__(self, *args, **kwargs):\n kwargs['blank'] = True\n+ kwargs.setdefault('help_text', 'Enter a duration in the format [DD] [[HH:]MM:]ss[.uuuuuu]')\n super().__init__(*args, **kwargs)\n \n def check(self, **kwargs):\n@@ -1587,7 +1588,7 @@ class DurationField(Field):\n empty_strings_allowed = False\n default_error_messages = {\n 'invalid': _(\"'%(value)s' value has an invalid format. It must be in \"\n- \"[DD] [HH:[MM:]]ss[.uuuuuu] format.\")\n+ \"[DD] [[HH:]MM:]ss[.uuuuuu] format.\")\n }\n description = _(\"Duration\")\n \n" + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "requests/models.py": [ + "Response.iter_content", + "Response.text" + ] + }, + "alternative_spans": {} }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -1587,8 +1587,9 @@\n empty_strings_allowed = False\n default_error_messages = {\n 'invalid': _(\"'%(value)s' value has an invalid format. It must be in \"\n- \"[DD] [HH:[MM:]]ss[.uuuuuu] format.\")\n+ \"[DD] [[HH:]MM:]ss[.uuuuuu] format.\")\n }\n+ help_text = _(\"Enter duration in the format: [DD] [[HH:]MM:]ss[.uuuuuu]. Days and hours are optional.\")\n description = _(\"Duration\")\n \n def get_internal_type(self):\n" + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "requests/models.py": [ + "docstring", + "imports", + "RequestEncodingMixin.path_url", + "RequestEncodingMixin._encode_params", + "RequestEncodingMixin._encode_files", + "RequestHooksMixin.register_hook", + "RequestHooksMixin.deregister_hook", + "Request.__init__", + "Request.__repr__", + "Request.prepare", + "PreparedRequest.__init__", + "PreparedRequest.prepare", + "PreparedRequest.__repr__", + "PreparedRequest.prepare_url", + "PreparedRequest.prepare_headers", + "PreparedRequest.prepare_body", + "PreparedRequest.prepare_content_length", + "PreparedRequest.prepare_auth", + "PreparedRequest.prepare_cookies", + "PreparedRequest.prepare_hooks", + "Response", + "Response.__init__", + "Response.__getstate__", + "Response.__setstate__", + "Response.__repr__", + "Response.is_redirect", + "Response.is_permanent_redirect", + "Response.iter_content", + "Response.iter_lines", + "Response.content", + "Response.text", + "Response.json", + "Response.links", + "Response.raise_for_status", + "Response.close" + ] + }, + "alternative_spans": {} }, { - "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\nindex f32ce64629..e2de88bbb8 100644\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -1587,7 +1587,7 @@ class DurationField(Field):\n empty_strings_allowed = False\n default_error_messages = {\n 'invalid': _(\"'%(value)s' value has an invalid format. It must be in \"\n- \"[DD] [HH:[MM:]]ss[.uuuuuu] format.\")\n+ \"[DD] [[HH:]MM:]ss[.uuuuuu] format.\")\n }\n description = _(\"Duration\")\n \n@@ -1631,10 +1631,12 @@ class DurationField(Field):\n return '' if val is None else duration_string(val)\n \n def formfield(self, **kwargs):\n- return super().formfield(**{\n- 'form_class': forms.DurationField,\n- **kwargs,\n- })\n+ defaults = {'help_text': _(\"Enter duration in the format: [DD] [[HH:]MM:]ss[.uuuuuu]\")}\n+ defaults.update(kwargs)\n+ return super().formfield(**{\n+ 'form_class': forms.DurationField,\n+ **defaults,\n+ })\n \n \n class EmailField(CharField):\n" + "name": "20240820_honeycomb", + "updated_spans": { + "requests/models.py": [ + "docstring", + "Response.__repr__", + "Response.iter_content" + ] + }, + "alternative_spans": {} }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\nindex f32ce64629..71005490f3 100644\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -1587,7 +1587,7 @@ class DurationField(Field):\n empty_strings_allowed = False\n default_error_messages = {\n 'invalid': _(\"'%(value)s' value has an invalid format. It must be in \"\n- \"[DD] [HH:[MM:]]ss[.uuuuuu] format.\")\n+ \"[DD] [[HH:]MM:]ss[.uuuuuu] format.\")\n }\n description = _(\"Duration\")\n \n" + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "requests/models.py": [ + "Response.iter_content" + ] + }, + "alternative_spans": {} }, { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\nindex f32ce64629..6b9e5a7c1e 100644\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -1587,9 +1587,10 @@ class DurationField(Field):\n empty_strings_allowed = False\n default_error_messages = {\n 'invalid': _(\"'%(value)s' value has an invalid format. It must be in \"\n- \"[DD] [HH:[MM:]]ss[.uuuuuu] format.\")\n+ \"[DD] [[HH:]MM:]ss[.uuuuuu] format.\")\n }\n description = _(\"Duration\")\n+ default_help_text = _(\"[DD] [[HH:]MM:]ss[.uuuuuu] format\")\n \n def get_internal_type(self):\n return \"DurationField\"\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "scikit-learn__scikit-learn-13439", - "repo": "scikit-learn/scikit-learn", - "base_commit": "a62775e99f2a5ea3d51db7160fad783f6cd8a4c5", - "problem_statement": "Pipeline should implement __len__\n#### Description\r\n\r\nWith the new indexing support `pipe[:len(pipe)]` raises an error.\r\n\r\n#### Steps/Code to Reproduce\r\n\r\n```python\r\nfrom sklearn import svm\r\nfrom sklearn.datasets import samples_generator\r\nfrom sklearn.feature_selection import SelectKBest\r\nfrom sklearn.feature_selection import f_regression\r\nfrom sklearn.pipeline import Pipeline\r\n\r\n# generate some data to play with\r\nX, y = samples_generator.make_classification(\r\n n_informative=5, n_redundant=0, random_state=42)\r\n\r\nanova_filter = SelectKBest(f_regression, k=5)\r\nclf = svm.SVC(kernel='linear')\r\npipe = Pipeline([('anova', anova_filter), ('svc', clf)])\r\n\r\nlen(pipe)\r\n```\r\n\r\n#### Versions\r\n\r\n```\r\nSystem:\r\n python: 3.6.7 | packaged by conda-forge | (default, Feb 19 2019, 18:37:23) [GCC 4.2.1 Compatible Clang 4.0.1 (tags/RELEASE_401/final)]\r\nexecutable: /Users/krisz/.conda/envs/arrow36/bin/python\r\n machine: Darwin-18.2.0-x86_64-i386-64bit\r\n\r\nBLAS:\r\n macros: HAVE_CBLAS=None\r\n lib_dirs: /Users/krisz/.conda/envs/arrow36/lib\r\ncblas_libs: openblas, openblas\r\n\r\nPython deps:\r\n pip: 19.0.3\r\nsetuptools: 40.8.0\r\n sklearn: 0.21.dev0\r\n numpy: 1.16.2\r\n scipy: 1.2.1\r\n Cython: 0.29.6\r\n pandas: 0.24.1\r\n```\n", - "golden_patch": "diff --git a/sklearn/pipeline.py b/sklearn/pipeline.py\n--- a/sklearn/pipeline.py\n+++ b/sklearn/pipeline.py\n@@ -199,6 +199,12 @@ def _iter(self, with_final=True):\n if trans is not None and trans != 'passthrough':\n yield idx, name, trans\n \n+ def __len__(self):\n+ \"\"\"\n+ Returns the length of the Pipeline\n+ \"\"\"\n+ return len(self.steps)\n+\n def __getitem__(self, ind):\n \"\"\"Returns a sub-pipeline or a single esimtator in the pipeline\n \n", - "expected_spans": { - "sklearn/pipeline.py": [ - "Pipeline.__getitem__" - ] - }, - "resolved_by": [ - { - "name": "20240509_amazon-q-developer-agent-20240430-dev", - "patch": "--- a/sklearn/pipeline.py\n+++ b/sklearn/pipeline.py\n@@ -128,12 +128,16 @@\n \n def __init__(self, steps, memory=None):\n self.steps = steps\n self._validate_steps()\n self.memory = memory\n \n+ def __len__(self):\n+ \"\"\"Returns the length of the Pipeline.\"\"\"\n+ return len(self.steps)\n+\n def get_params(self, deep=True):\n \"\"\"Get parameters for this estimator.\n \n Parameters\n ----------\n deep : boolean, optional\n" + "name": "20240908_infant_gpt4o", + "updated_spans": { + "requests/utils.py": [ + "stream_decode_response_unicode" + ] + }, + "alternative_spans": { + "requests/utils.py": [ + "stream_decode_response_unicode" + ] + } }, { - "name": "20240523_aider", - "patch": "diff --git a/sklearn/pipeline.py b/sklearn/pipeline.py\nindex 7eaf9a46f..5efce3e13 100644\n--- a/sklearn/pipeline.py\n+++ b/sklearn/pipeline.py\n@@ -219,6 +219,10 @@ class Pipeline(_BaseComposition):\n return self.named_steps[ind]\n return est\n \n+ def __len__(self):\n+ \"\"\"Return the number of steps in the pipeline.\"\"\"\n+ return len(self.steps)\n+\n @property\n def _estimator_type(self):\n return self.steps[-1][1]._estimator_type\n" + "name": "20240829_Isoform", + "updated_spans": { + "requests/models.py": [ + "Response.iter_content" + ] + }, + "alternative_spans": {} }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/sklearn/pipeline.py\n+++ b/sklearn/pipeline.py\n@@ -199,6 +199,10 @@\n if trans is not None and trans != 'passthrough':\n yield idx, name, trans\n \n+ def __len__(self):\n+ \"\"\"Return the number of steps in the pipeline.\"\"\"\n+ return len(self.steps)\n+\n def __getitem__(self, ind):\n \"\"\"Returns a sub-pipeline or a single esimtator in the pipeline\n \n" + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "requests/models.py": [ + "Response.iter_content" + ] + }, + "alternative_spans": {} }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/sklearn/pipeline.py b/sklearn/pipeline.py\nindex 7eaf9a46f..9d254070a 100644\n--- a/sklearn/pipeline.py\n+++ b/sklearn/pipeline.py\n@@ -27,6 +27,10 @@ __all__ = ['Pipeline', 'FeatureUnion', 'make_pipeline', 'make_union']\n \n \n class Pipeline(_BaseComposition):\n+\n+ def __len__(self):\n+ \"\"\"Return the number of steps in the pipeline.\"\"\"\n+ return len(self.steps)\n \"\"\"Pipeline of transforms with a final estimator.\n \n Sequentially apply a list of transforms and a final estimator.\n" + "name": "20240622_Lingma_Agent", + "updated_spans": { + "requests/models.py": [ + "Response.iter_content" + ], + "requests/utils.py": [ + "stream_decode_response_unicode" + ] + }, + "alternative_spans": { + "requests/utils.py": [ + "stream_decode_response_unicode" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/sklearn/pipeline.py b/sklearn/pipeline.py\nindex 7eaf9a46f..12a0131bc 100644\n--- a/sklearn/pipeline.py\n+++ b/sklearn/pipeline.py\n@@ -561,6 +561,9 @@ class Pipeline(_BaseComposition):\n # check if first estimator expects pairwise input\n return getattr(self.steps[0][1], '_pairwise', False)\n \n+ def __len__(self):\n+ \"\"\"Return the number of steps in the pipeline.\"\"\"\n+ return len(self.steps)\n \n def _name_estimators(estimators):\n \"\"\"Generate names for estimators.\"\"\"\n" - }, - { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/sklearn/pipeline.py b/sklearn/pipeline.py\nindex 7eaf9a46f..bc42f5c43 100644\n--- a/sklearn/pipeline.py\n+++ b/sklearn/pipeline.py\n@@ -223,6 +223,9 @@ class Pipeline(_BaseComposition):\n def _estimator_type(self):\n return self.steps[-1][1]._estimator_type\n \n+ def __len__(self):\n+ return len(self.steps)\n+\n @property\n def named_steps(self):\n # Use Bunch object to improve autocomplete\n" + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "requests/models.py": [ + "Response.iter_content" + ] + }, + "alternative_spans": {} }, { "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/sklearn/pipeline.py b/sklearn/pipeline.py\nindex 7eaf9a46f..16307f263 100644\n--- a/sklearn/pipeline.py\n+++ b/sklearn/pipeline.py\n@@ -130,6 +130,9 @@ class Pipeline(_BaseComposition):\n self.steps = steps\n self._validate_steps()\n self.memory = memory\n+ def __len__(self):\n+ \"\"\"Return the number of steps in the pipeline.\"\"\"\n+ return len(self.steps)\n \n def get_params(self, deep=True):\n \"\"\"Get parameters for this estimator.\n" - } - ], - "alternative_spans": [ - { - "run_name": "20240509_amazon-q-developer-agent-20240430-dev", - "spans": { - "sklearn/pipeline.py": [ - "Pipeline", - "Pipeline.get_params" + "updated_spans": { + "requests/utils.py": [ + "stream_decode_response_unicode" + ], + "tests/conftest.py": [ + "httpbin" + ], + "tests/test_utils.py": [ + "TestSuperLen.test_io_streams" + ] + }, + "alternative_spans": { + "requests/utils.py": [ + "stream_decode_response_unicode" ] } }, { - "run_name": "20240523_aider", - "spans": { - "sklearn/pipeline.py": [ - "Pipeline._estimator_type" + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "requests/models.py": [ + "Response.iter_content" ] - } + }, + "alternative_spans": {} }, { - "run_name": "20240527_marscode-agent-dev", - "spans": { - "sklearn/pipeline.py": [ - "Pipeline" + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "requests/models.py": [ + "Response.iter_content" ] - } + }, + "alternative_spans": {} }, { - "run_name": "20240604_CodeR", - "spans": { - "sklearn/pipeline.py": [] + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "requests/models.py": [ + "imports", + "Response.iter_content" + ], + "requests/utils.py": [ + "stream_decode_response_unicode" + ] + }, + "alternative_spans": { + "requests/utils.py": [ + "stream_decode_response_unicode" + ] } }, { - "run_name": "20240609_moatless_gpt4o", - "spans": { - "sklearn/pipeline.py": [ - "Pipeline.named_steps" + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "requests/utils.py": [ + "stream_decode_response_unicode", + "iter_slices" + ] + }, + "alternative_spans": { + "requests/utils.py": [ + "stream_decode_response_unicode", + "iter_slices" ] } }, { - "run_name": "20240612_IBM_Research_Agent101", - "spans": { - "sklearn/pipeline.py": [] + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "requests/models.py": [ + "Response.iter_content" + ], + "tests/test_requests.py": [ + "TestRequests.test_response_chunk_size_int" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "requests/utils.py": [ + "stream_decode_response_unicode", + "guess_json_utf", + "prepend_scheme_if_needed" + ] + }, + "alternative_spans": { + "requests/utils.py": [ + "stream_decode_response_unicode", + "guess_json_utf", + "prepend_scheme_if_needed" + ] } } - ] + ], + "alternative_spans": [] }, { - "instance_id": "sympy__sympy-16281", + "instance_id": "sympy__sympy-11400", "repo": "sympy/sympy", - "base_commit": "41490b75f3621408e0468b0e7b6dc409601fc6ff", - "problem_statement": "Product pretty print could be improved\nThis is what the pretty printing for `Product` looks like:\r\n\r\n```\r\n>>> pprint(Product(1, (n, 1, oo)))\r\n \u221e\r\n\u252c\u2500\u2500\u2500\u252c\r\n\u2502 \u2502 1\r\n\u2502 \u2502\r\nn = 1\r\n>>> pprint(Product(1/n, (n, 1, oo)))\r\n \u221e\r\n\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u252c\r\n\u2502 \u2502 1\r\n\u2502 \u2502 \u2500\r\n\u2502 \u2502 n\r\n\u2502 \u2502\r\n n = 1\r\n>>> pprint(Product(1/n**2, (n, 1, oo)))\r\n \u221e\r\n\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\r\n\u2502 \u2502 1\r\n\u2502 \u2502 \u2500\u2500\r\n\u2502 \u2502 2\r\n\u2502 \u2502 n\r\n\u2502 \u2502\r\n n = 1\r\n>>> pprint(Product(1, (n, 1, oo)), use_unicode=False)\r\n oo\r\n_____\r\n| | 1\r\n| |\r\nn = 1\r\n>>> pprint(Product(1/n, (n, 1, oo)), use_unicode=False)\r\n oo\r\n________\r\n| | 1\r\n| | -\r\n| | n\r\n| |\r\n n = 1\r\n>>> pprint(Product(1/n**2, (n, 1, oo)), use_unicode=False)\r\n oo\r\n__________\r\n| | 1\r\n| | --\r\n| | 2\r\n| | n\r\n| |\r\n n = 1\r\n```\r\n\r\n(if those don't look good in your browser copy paste them into the terminal)\r\n\r\nThis could be improved:\r\n\r\n- Why is there always an empty line at the bottom of the \u220f? Keeping everything below the horizontal line is good, but the bottom looks asymmetric, and it makes the \u220f bigger than it needs to be.\r\n\r\n- The \u220f is too fat IMO. \r\n\r\n- It might look better if we extended the top bar. I'm unsure about this. \r\n\r\nCompare this\r\n\r\n```\r\n \u221e\r\n\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u252c\u2500\r\n \u2502 \u2502 1\r\n \u2502 \u2502 \u2500\u2500\r\n \u2502 \u2502 2\r\n \u2502 \u2502 n\r\n n = 1\r\n```\r\n\r\nThat's still almost twice as wide as the equivalent Sum, but if you make it much skinnier it starts to look bad.\r\n\r\n```\r\n \u221e\r\n ____\r\n \u2572\r\n \u2572 1\r\n \u2572 \u2500\u2500\r\n \u2571 2\r\n \u2571 n\r\n \u2571\r\n \u203e\u203e\u203e\u203e\r\nn = 1\r\n```\n", - "golden_patch": "diff --git a/sympy/printing/pretty/pretty.py b/sympy/printing/pretty/pretty.py\n--- a/sympy/printing/pretty/pretty.py\n+++ b/sympy/printing/pretty/pretty.py\n@@ -491,10 +491,9 @@ def _print_Product(self, expr):\n \n for lim in expr.limits:\n width = (func_height + 2) * 5 // 3 - 2\n- sign_lines = []\n- sign_lines.append(corner_chr + (horizontal_chr*width) + corner_chr)\n- for i in range(func_height + 1):\n- sign_lines.append(vertical_chr + (' '*width) + vertical_chr)\n+ sign_lines = [horizontal_chr + corner_chr + (horizontal_chr * (width-2)) + corner_chr + horizontal_chr]\n+ for _ in range(func_height + 1):\n+ sign_lines.append(' ' + vertical_chr + (' ' * (width-2)) + vertical_chr + ' ')\n \n pretty_sign = stringPict('')\n pretty_sign = prettyForm(*pretty_sign.stack(*sign_lines))\n", + "base_commit": "8dcb12a6cf500e8738d6729ab954a261758f49ca", + "problem_statement": "ccode(sinc(x)) doesn't work\n```\nIn [30]: ccode(sinc(x))\nOut[30]: '// Not supported in C:\\n// sinc\\nsinc(x)'\n```\n\nI don't think `math.h` has `sinc`, but it could print\n\n```\nIn [38]: ccode(Piecewise((sin(theta)/theta, Ne(theta, 0)), (1, True)))\nOut[38]: '((Ne(theta, 0)) ? (\\n sin(theta)/theta\\n)\\n: (\\n 1\\n))'\n```\n\n", + "golden_patch": "diff --git a/sympy/printing/ccode.py b/sympy/printing/ccode.py\n--- a/sympy/printing/ccode.py\n+++ b/sympy/printing/ccode.py\n@@ -231,6 +231,20 @@ def _print_Symbol(self, expr):\n else:\n return name\n \n+ def _print_Relational(self, expr):\n+ lhs_code = self._print(expr.lhs)\n+ rhs_code = self._print(expr.rhs)\n+ op = expr.rel_op\n+ return (\"{0} {1} {2}\").format(lhs_code, op, rhs_code)\n+\n+ def _print_sinc(self, expr):\n+ from sympy.functions.elementary.trigonometric import sin\n+ from sympy.core.relational import Ne\n+ from sympy.functions import Piecewise\n+ _piecewise = Piecewise(\n+ (sin(expr.args[0]) / expr.args[0], Ne(expr.args[0], 0)), (1, True))\n+ return self._print(_piecewise)\n+\n def _print_AugmentedAssignment(self, expr):\n lhs_code = self._print(expr.lhs)\n op = expr.rel_op\n", + "test_patch": "diff --git a/sympy/printing/tests/test_ccode.py b/sympy/printing/tests/test_ccode.py\n--- a/sympy/printing/tests/test_ccode.py\n+++ b/sympy/printing/tests/test_ccode.py\n@@ -120,6 +120,16 @@ def test_ccode_boolean():\n assert ccode((x | y) & z) == \"z && (x || y)\"\n \n \n+def test_ccode_Relational():\n+ from sympy import Eq, Ne, Le, Lt, Gt, Ge\n+ assert ccode(Eq(x, y)) == \"x == y\"\n+ assert ccode(Ne(x, y)) == \"x != y\"\n+ assert ccode(Le(x, y)) == \"x <= y\"\n+ assert ccode(Lt(x, y)) == \"x < y\"\n+ assert ccode(Gt(x, y)) == \"x > y\"\n+ assert ccode(Ge(x, y)) == \"x >= y\"\n+\n+\n def test_ccode_Piecewise():\n expr = Piecewise((x, x < 1), (x**2, True))\n assert ccode(expr) == (\n@@ -162,6 +172,18 @@ def test_ccode_Piecewise():\n raises(ValueError, lambda: ccode(expr))\n \n \n+def test_ccode_sinc():\n+ from sympy import sinc\n+ expr = sinc(x)\n+ assert ccode(expr) == (\n+ \"((x != 0) ? (\\n\"\n+ \" sin(x)/x\\n\"\n+ \")\\n\"\n+ \": (\\n\"\n+ \" 1\\n\"\n+ \"))\")\n+\n+\n def test_ccode_Piecewise_deep():\n p = ccode(2*Piecewise((x, x < 1), (x + 1, x < 2), (x**2, True)))\n assert p == (\n", + "fail_to_pass": "[\"test_ccode_Relational\", \"test_ccode_sinc\"]", + "pass_to_pass": "[\"test_printmethod\", \"test_ccode_sqrt\", \"test_ccode_Pow\", \"test_ccode_constants_mathh\", \"test_ccode_constants_other\", \"test_ccode_Rational\", \"test_ccode_Integer\", \"test_ccode_functions\", \"test_ccode_inline_function\", \"test_ccode_exceptions\", \"test_ccode_user_functions\", \"test_ccode_boolean\", \"test_ccode_Piecewise\", \"test_ccode_Piecewise_deep\", \"test_ccode_ITE\", \"test_ccode_settings\", \"test_ccode_Indexed\", \"test_ccode_Indexed_without_looking_for_contraction\", \"test_ccode_loops_matrix_vector\", \"test_dummy_loops\", \"test_ccode_loops_add\", \"test_ccode_loops_multiple_contractions\", \"test_ccode_loops_addfactor\", \"test_ccode_loops_multiple_terms\", \"test_dereference_printing\", \"test_Matrix_printing\", \"test_ccode_reserved_words\", \"test_ccode_sign\", \"test_ccode_Assignment\"]", "expected_spans": { - "sympy/printing/pretty/pretty.py": [ - "PrettyPrinter._print_Product" + "sympy/printing/ccode.py": [ + "CCodePrinter._print_AugmentedAssignment" + ] + }, + "test_file_spans": { + "sympy/printing/tests/test_ccode.py": [ + "test_ccode_Piecewise", + "test_ccode_Piecewise_deep" ] }, "resolved_by": [], "alternative_spans": [] }, { - "instance_id": "django__django-11099", - "repo": "django/django", - "base_commit": "d26b2424437dabeeca94d7900b37d2df4410da0c", - "problem_statement": "UsernameValidator allows trailing newline in usernames\nDescription\n\t\nASCIIUsernameValidator and UnicodeUsernameValidator use the regex \nr'^[\\w.@+-]+$'\nThe intent is to only allow alphanumeric characters as well as ., @, +, and -. However, a little known quirk of Python regexes is that $ will also match a trailing newline. Therefore, the user name validators will accept usernames which end with a newline. You can avoid this behavior by instead using \\A and \\Z to terminate regexes. For example, the validator regex could be changed to\nr'\\A[\\w.@+-]+\\Z'\nin order to reject usernames that end with a newline.\nI am not sure how to officially post a patch, but the required change is trivial - using the regex above in the two validators in contrib.auth.validators.\n", - "golden_patch": "diff --git a/django/contrib/auth/validators.py b/django/contrib/auth/validators.py\n--- a/django/contrib/auth/validators.py\n+++ b/django/contrib/auth/validators.py\n@@ -7,7 +7,7 @@\n \n @deconstructible\n class ASCIIUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'^[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only English letters, '\n 'numbers, and @/./+/-/_ characters.'\n@@ -17,7 +17,7 @@ class ASCIIUsernameValidator(validators.RegexValidator):\n \n @deconstructible\n class UnicodeUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'^[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only letters, '\n 'numbers, and @/./+/-/_ characters.'\n", + "instance_id": "sympy__sympy-11870", + "repo": "sympy/sympy", + "base_commit": "5c2e1f96a7ff562d4a778f4ca9ffc9c81557197e", + "problem_statement": "simplifying exponential -> trig identities\n```\r\nf = 1 / 2 * (-I*exp(I*k) + I*exp(-I*k))\r\ntrigsimp(f)\r\n```\r\n\r\nIdeally, this would yield `sin(k)`. Is there a way to do this?\r\n\r\nAs a corollary, it would be awesome if \r\n\r\n```\r\nf = 1 / 2 / k* (-I*exp(I*k) + I*exp(-I*k))\r\ntrigsimp(f)\r\n```\r\n\r\ncould yield `sinc(k)`. Thank you for your consideration!\n", + "golden_patch": "diff --git a/sympy/functions/elementary/trigonometric.py b/sympy/functions/elementary/trigonometric.py\n--- a/sympy/functions/elementary/trigonometric.py\n+++ b/sympy/functions/elementary/trigonometric.py\n@@ -16,6 +16,8 @@\n from sympy.sets.sets import FiniteSet\n from sympy.utilities.iterables import numbered_symbols\n from sympy.core.compatibility import range\n+from sympy.core.relational import Ne\n+from sympy.functions.elementary.piecewise import Piecewise\n \n ###############################################################################\n ########################## TRIGONOMETRIC FUNCTIONS ############################\n@@ -400,6 +402,9 @@ def _eval_rewrite_as_csc(self, arg):\n def _eval_rewrite_as_sec(self, arg):\n return 1 / sec(arg - S.Pi / 2, evaluate=False)\n \n+ def _eval_rewrite_as_sinc(self, arg):\n+ return arg*sinc(arg)\n+\n def _eval_conjugate(self):\n return self.func(self.args[0].conjugate())\n \n@@ -1789,7 +1794,7 @@ def _eval_rewrite_as_jn(self, arg):\n return jn(0, arg)\n \n def _eval_rewrite_as_sin(self, arg):\n- return sin(arg) / arg\n+ return Piecewise((sin(arg)/arg, Ne(arg, 0)), (1, True))\n \n \n ###############################################################################\n", + "test_patch": "diff --git a/sympy/functions/elementary/tests/test_trigonometric.py b/sympy/functions/elementary/tests/test_trigonometric.py\n--- a/sympy/functions/elementary/tests/test_trigonometric.py\n+++ b/sympy/functions/elementary/tests/test_trigonometric.py\n@@ -6,6 +6,8 @@\n AccumBounds)\n from sympy.core.compatibility import range\n from sympy.utilities.pytest import XFAIL, slow, raises\n+from sympy.core.relational import Ne, Eq\n+from sympy.functions.elementary.piecewise import Piecewise\n \n x, y, z = symbols('x y z')\n r = Symbol('r', real=True)\n@@ -704,7 +706,7 @@ def test_sinc():\n assert sinc(x).series() == 1 - x**2/6 + x**4/120 + O(x**6)\n \n assert sinc(x).rewrite(jn) == jn(0, x)\n- assert sinc(x).rewrite(sin) == sin(x) / x\n+ assert sinc(x).rewrite(sin) == Piecewise((sin(x)/x, Ne(x, 0)), (1, True))\n \n \n def test_asin():\n@@ -1507,6 +1509,14 @@ def test_trig_period():\n assert tan(3*x).period(y) == S.Zero\n raises(NotImplementedError, lambda: sin(x**2).period(x))\n \n+\n def test_issue_7171():\n assert sin(x).rewrite(sqrt) == sin(x)\n assert sin(x).rewrite(pow) == sin(x)\n+\n+\n+def test_issue_11864():\n+ w, k = symbols('w, k', real=True)\n+ F = Piecewise((1, Eq(2*pi*k, 0)), (sin(pi*k)/(pi*k), True))\n+ soln = Piecewise((1, Eq(2*pi*k, 0)), (sinc(pi*k), True))\n+ assert F.rewrite(sinc) == soln\n", + "fail_to_pass": "[\"test_sinc\"]", + "pass_to_pass": "[\"test_sin\", \"test_sin_cos\", \"test_sin_series\", \"test_sin_rewrite\", \"test_sin_expansion\", \"test_sin_AccumBounds\", \"test_trig_symmetry\", \"test_cos\", \"test_issue_6190\", \"test_cos_series\", \"test_cos_rewrite\", \"test_cos_expansion\", \"test_cos_AccumBounds\", \"test_tan\", \"test_tan_series\", \"test_tan_rewrite\", \"test_tan_subs\", \"test_tan_expansion\", \"test_tan_AccumBounds\", \"test_cot\", \"test_cot_series\", \"test_cot_rewrite\", \"test_cot_subs\", \"test_cot_expansion\", \"test_cot_AccumBounds\", \"test_asin\", \"test_asin_series\", \"test_asin_rewrite\", \"test_acos\", \"test_acos_series\", \"test_acos_rewrite\", \"test_atan\", \"test_atan_rewrite\", \"test_atan2\", \"test_acot\", \"test_acot_rewrite\", \"test_attributes\", \"test_sincos_rewrite\", \"test_evenodd_rewrite\", \"test_issue_4547\", \"test_as_leading_term_issue_5272\", \"test_leading_terms\", \"test_atan2_expansion\", \"test_aseries\", \"test_issue_4420\", \"test_inverses\", \"test_real_imag\", \"test_sec\", \"test_sec_rewrite\", \"test_csc\", \"test_asec\", \"test_asec_is_real\", \"test_acsc\", \"test_csc_rewrite\", \"test_issue_8653\", \"test_issue_9157\", \"test_trig_period\", \"test_issue_7171\"]", "expected_spans": { - "django/contrib/auth/validators.py": [ - "ASCIIUsernameValidator", - "UnicodeUsernameValidator" + "sympy/functions/elementary/trigonometric.py": [ + "imports", + "sin._eval_rewrite_as_sqrt", + "sinc.eval" ] }, - "resolved_by": [ - { - "name": "20240402_sweagent_claude3opus", - "patch": "diff --git a/django/contrib/auth/validators.py b/django/contrib/auth/validators.py\nindex b4878cfd45..1304f20a60 100644\n--- a/django/contrib/auth/validators.py\n+++ b/django/contrib/auth/validators.py\n@@ -7,7 +7,7 @@ from django.utils.translation import gettext_lazy as _\n \n @deconstructible\n class ASCIIUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only English letters, '\n 'numbers, and @/./+/-/_ characters.'\n@@ -17,7 +17,7 @@ class ASCIIUsernameValidator(validators.RegexValidator):\n \n @deconstructible\n class UnicodeUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only letters, '\n 'numbers, and @/./+/-/_ characters.'\n" - }, - { - "name": "20240402_sweagent_gpt4", - "patch": "diff --git a/django/contrib/auth/validators.py b/django/contrib/auth/validators.py\nindex b4878cfd45..1304f20a60 100644\n--- a/django/contrib/auth/validators.py\n+++ b/django/contrib/auth/validators.py\n@@ -7,7 +7,7 @@ from django.utils.translation import gettext_lazy as _\n \n @deconstructible\n class ASCIIUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only English letters, '\n 'numbers, and @/./+/-/_ characters.'\n@@ -17,7 +17,7 @@ class ASCIIUsernameValidator(validators.RegexValidator):\n \n @deconstructible\n class UnicodeUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only letters, '\n 'numbers, and @/./+/-/_ characters.'\ndiff --git a/regex_test.py b/regex_test.py\nnew file mode 100644\nindex 0000000000..c6a6128d5e\n--- /dev/null\n+++ b/regex_test.py\n@@ -0,0 +1,23 @@\n+import re\n+\n+# Regex patterns from validators\n+ascii_pattern = re.compile(r'\\A[\\w.@+-]+\\Z')\n+unicode_pattern = re.compile(r'\\A[\\w.@+-]+\\Z')\n+\n+# Username with a trailing newline\n+username_with_newline = \"testuser\\n\"\n+\n+# Function to test the regex patterns\n+def test_regex_patterns(username):\n+ if ascii_pattern.match(username):\n+ print(\"ASCIIUsernameValidator accepted the username.\")\n+ else:\n+ print(\"ASCIIUsernameValidator rejected the username.\")\n+\n+ if unicode_pattern.match(username):\n+ print(\"UnicodeUsernameValidator accepted the username.\")\n+ else:\n+ print(\"UnicodeUsernameValidator rejected the username.\")\n+\n+# Run the test\n+test_regex_patterns(username_with_newline)\ndiff --git a/simple_test_validators.py b/simple_test_validators.py\nnew file mode 100644\nindex 0000000000..507643c0a6\n--- /dev/null\n+++ b/simple_test_validators.py\n@@ -0,0 +1,19 @@\n+from django.contrib.auth.validators import ASCIIUsernameValidator, UnicodeUsernameValidator\n+from django.core.exceptions import ValidationError\n+\n+validators = [ASCIIUsernameValidator(), UnicodeUsernameValidator()]\n+\n+# Username with a trailing newline\n+username_with_newline = \"testuser\\\\n\"\n+\n+# Function to test the validators\n+def test_validators(username):\n+ for validator in validators:\n+ try:\n+ validator(username)\n+ print(f\"{validator.__class__.__name__} accepted the username.\")\n+ except ValidationError as e:\n+ print(f\"{validator.__class__.__name__} rejected the username: {e}\")\n+\n+# Run the test\n+test_validators(username_with_newline)\ndiff --git a/test_validators.py b/test_validators.py\nnew file mode 100644\nindex 0000000000..4edb270156\n--- /dev/null\n+++ b/test_validators.py\n@@ -0,0 +1,28 @@\n+import os\n+import django\n+from django.conf import settings\n+from django.core.exceptions import ValidationError\n+\n+# Configure Django settings\n+os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django__django.settings')\n+django.setup()\n+\n+from django.contrib.auth.validators import ASCIIUsernameValidator, UnicodeUsernameValidator\n+\n+validators = [ASCIIUsernameValidator(), UnicodeUsernameValidator()]\n+\n+# Username with a trailing newline\n+username_with_newline = \"testuser\\\\n\"\n+\n+# Function to test the validators\n+def test_validators(username):\n+ for validator in validators:\n+ try:\n+ validator(username)\n+ print(f\"{validator.__class__.__name__} accepted the username.\")\n+ except ValidationError as e:\n+ print(f\"{validator.__class__.__name__} rejected the username: {e}\")\n+\n+# Run the test\n+test_validators(username_with_newline)\n+\n" - }, - { - "name": "20240509_amazon-q-developer-agent-20240430-dev", - "patch": "--- a/django/contrib/auth/validators.py\n+++ b/django/contrib/auth/validators.py\n@@ -4,22 +4,22 @@\n from django.utils.deconstruct import deconstructible\n from django.utils.translation import gettext_lazy as _\n \n \n @deconstructible\n class ASCIIUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only English letters, '\n 'numbers, and @/./+/-/_ characters.'\n )\n flags = re.ASCII\n \n \n @deconstructible\n class UnicodeUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only letters, '\n 'numbers, and @/./+/-/_ characters.'\n )\n flags = 0\n" - }, - { - "name": "20240523_aider", - "patch": "diff --git a/django/contrib/auth/validators.py b/django/contrib/auth/validators.py\nindex b4878cfd45..1304f20a60 100644\n--- a/django/contrib/auth/validators.py\n+++ b/django/contrib/auth/validators.py\n@@ -7,7 +7,7 @@ from django.utils.translation import gettext_lazy as _\n \n @deconstructible\n class ASCIIUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only English letters, '\n 'numbers, and @/./+/-/_ characters.'\n@@ -17,7 +17,7 @@ class ASCIIUsernameValidator(validators.RegexValidator):\n \n @deconstructible\n class UnicodeUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only letters, '\n 'numbers, and @/./+/-/_ characters.'\n" - }, - { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/django/contrib/auth/validators.py b/django/contrib/auth/validators.py\nindex b4878cfd45..1304f20a60 100644\n--- a/django/contrib/auth/validators.py\n+++ b/django/contrib/auth/validators.py\n@@ -7,7 +7,7 @@ from django.utils.translation import gettext_lazy as _\n \n @deconstructible\n class ASCIIUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only English letters, '\n 'numbers, and @/./+/-/_ characters.'\n@@ -17,7 +17,7 @@ class ASCIIUsernameValidator(validators.RegexValidator):\n \n @deconstructible\n class UnicodeUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only letters, '\n 'numbers, and @/./+/-/_ characters.'\n\n" - }, - { - "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/django/contrib/auth/validators.py b/django/contrib/auth/validators.py\nindex b4878cfd45..1304f20a60 100644\n--- a/django/contrib/auth/validators.py\n+++ b/django/contrib/auth/validators.py\n@@ -7,7 +7,7 @@ from django.utils.translation import gettext_lazy as _\n \n @deconstructible\n class ASCIIUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only English letters, '\n 'numbers, and @/./+/-/_ characters.'\n@@ -17,7 +17,7 @@ class ASCIIUsernameValidator(validators.RegexValidator):\n \n @deconstructible\n class UnicodeUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only letters, '\n 'numbers, and @/./+/-/_ characters.'\n" - }, - { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/django/contrib/auth/validators.py b/django/contrib/auth/validators.py\nindex b4878cfd45..1304f20a60 100644\n--- a/django/contrib/auth/validators.py\n+++ b/django/contrib/auth/validators.py\n@@ -7,7 +7,7 @@ from django.utils.translation import gettext_lazy as _\n \n @deconstructible\n class ASCIIUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only English letters, '\n 'numbers, and @/./+/-/_ characters.'\n@@ -17,7 +17,7 @@ class ASCIIUsernameValidator(validators.RegexValidator):\n \n @deconstructible\n class UnicodeUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only letters, '\n 'numbers, and @/./+/-/_ characters.'\n" - }, - { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/django/contrib/auth/validators.py b/django/contrib/auth/validators.py\nindex b4878cfd45..1304f20a60 100644\n--- a/django/contrib/auth/validators.py\n+++ b/django/contrib/auth/validators.py\n@@ -7,7 +7,7 @@ from django.utils.translation import gettext_lazy as _\n \n @deconstructible\n class ASCIIUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only English letters, '\n 'numbers, and @/./+/-/_ characters.'\n@@ -17,7 +17,7 @@ class ASCIIUsernameValidator(validators.RegexValidator):\n \n @deconstructible\n class UnicodeUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only letters, '\n 'numbers, and @/./+/-/_ characters.'\n" - }, - { - "name": "20240612_IBM_Research_Agent101", - "patch": "diff --git a/django/contrib/auth/validators.py b/django/contrib/auth/validators.py\nindex b4878cfd45..1304f20a60 100644\n--- a/django/contrib/auth/validators.py\n+++ b/django/contrib/auth/validators.py\n@@ -7,7 +7,7 @@ from django.utils.translation import gettext_lazy as _\n \n @deconstructible\n class ASCIIUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only English letters, '\n 'numbers, and @/./+/-/_ characters.'\n@@ -17,7 +17,7 @@ class ASCIIUsernameValidator(validators.RegexValidator):\n \n @deconstructible\n class UnicodeUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'\\A[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only letters, '\n 'numbers, and @/./+/-/_ characters.'\ndiff --git a/regex_test.py b/regex_test.py\nnew file mode 100644\nindex 0000000000..c6a6128d5e\n--- /dev/null\n+++ b/regex_test.py\n@@ -0,0 +1,23 @@\n+import re\n+\n+# Regex patterns from validators\n+ascii_pattern = re.compile(r'\\A[\\w.@+-]+\\Z')\n+unicode_pattern = re.compile(r'\\A[\\w.@+-]+\\Z')\n+\n+# Username with a trailing newline\n+username_with_newline = \"testuser\\n\"\n+\n+# Function to test the regex patterns\n+def test_regex_patterns(username):\n+ if ascii_pattern.match(username):\n+ print(\"ASCIIUsernameValidator accepted the username.\")\n+ else:\n+ print(\"ASCIIUsernameValidator rejected the username.\")\n+\n+ if unicode_pattern.match(username):\n+ print(\"UnicodeUsernameValidator accepted the username.\")\n+ else:\n+ print(\"UnicodeUsernameValidator rejected the username.\")\n+\n+# Run the test\n+test_regex_patterns(username_with_newline)\ndiff --git a/simple_test_validators.py b/simple_test_validators.py\nnew file mode 100644\nindex 0000000000..507643c0a6\n--- /dev/null\n+++ b/simple_test_validators.py\n@@ -0,0 +1,19 @@\n+from django.contrib.auth.validators import ASCIIUsernameValidator, UnicodeUsernameValidator\n+from django.core.exceptions import ValidationError\n+\n+validators = [ASCIIUsernameValidator(), UnicodeUsernameValidator()]\n+\n+# Username with a trailing newline\n+username_with_newline = \"testuser\\\\n\"\n+\n+# Function to test the validators\n+def test_validators(username):\n+ for validator in validators:\n+ try:\n+ validator(username)\n+ print(f\"{validator.__class__.__name__} accepted the username.\")\n+ except ValidationError as e:\n+ print(f\"{validator.__class__.__name__} rejected the username: {e}\")\n+\n+# Run the test\n+test_validators(username_with_newline)\ndiff --git a/test_validators.py b/test_validators.py\nnew file mode 100644\nindex 0000000000..4edb270156\n--- /dev/null\n+++ b/test_validators.py\n@@ -0,0 +1,28 @@\n+import os\n+import django\n+from django.conf import settings\n+from django.core.exceptions import ValidationError\n+\n+# Configure Django settings\n+os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django__django.settings')\n+django.setup()\n+\n+from django.contrib.auth.validators import ASCIIUsernameValidator, UnicodeUsernameValidator\n+\n+validators = [ASCIIUsernameValidator(), UnicodeUsernameValidator()]\n+\n+# Username with a trailing newline\n+username_with_newline = \"testuser\\\\n\"\n+\n+# Function to test the validators\n+def test_validators(username):\n+ for validator in validators:\n+ try:\n+ validator(username)\n+ print(f\"{validator.__class__.__name__} accepted the username.\")\n+ except ValidationError as e:\n+ print(f\"{validator.__class__.__name__} rejected the username: {e}\")\n+\n+# Run the test\n+test_validators(username_with_newline)\n+\n" - } - ], + "test_file_spans": { + "sympy/functions/elementary/tests/test_trigonometric.py": [ + "imports", + "test_sinc", + "test_issue_9157" + ] + }, + "resolved_by": [], "alternative_spans": [] }, { - "instance_id": "scikit-learn__scikit-learn-13496", - "repo": "scikit-learn/scikit-learn", - "base_commit": "3aefc834dce72e850bff48689bea3c7dff5f3fad", - "problem_statement": "Expose warm_start in Isolation forest\nIt seems to me that `sklearn.ensemble.IsolationForest` supports incremental addition of new trees with the `warm_start` parameter of its parent class, `sklearn.ensemble.BaseBagging`.\r\n\r\nEven though this parameter is not exposed in `__init__()` , it gets inherited from `BaseBagging` and one can use it by changing it to `True` after initialization. To make it work, you have to also increment `n_estimators` on every iteration. \r\n\r\nIt took me a while to notice that it actually works, and I had to inspect the source code of both `IsolationForest` and `BaseBagging`. Also, it looks to me that the behavior is in-line with `sklearn.ensemble.BaseForest` that is behind e.g. `sklearn.ensemble.RandomForestClassifier`.\r\n\r\nTo make it more easier to use, I'd suggest to:\r\n* expose `warm_start` in `IsolationForest.__init__()`, default `False`;\r\n* document it in the same way as it is documented for `RandomForestClassifier`, i.e. say:\r\n```py\r\n warm_start : bool, optional (default=False)\r\n When set to ``True``, reuse the solution of the previous call to fit\r\n and add more estimators to the ensemble, otherwise, just fit a whole\r\n new forest. See :term:`the Glossary `.\r\n```\r\n* add a test to make sure it works properly;\r\n* possibly also mention in the \"IsolationForest example\" documentation entry;\r\n\n", - "golden_patch": "diff --git a/sklearn/ensemble/iforest.py b/sklearn/ensemble/iforest.py\n--- a/sklearn/ensemble/iforest.py\n+++ b/sklearn/ensemble/iforest.py\n@@ -120,6 +120,12 @@ class IsolationForest(BaseBagging, OutlierMixin):\n verbose : int, optional (default=0)\n Controls the verbosity of the tree building process.\n \n+ warm_start : bool, optional (default=False)\n+ When set to ``True``, reuse the solution of the previous call to fit\n+ and add more estimators to the ensemble, otherwise, just fit a whole\n+ new forest. See :term:`the Glossary `.\n+\n+ .. versionadded:: 0.21\n \n Attributes\n ----------\n@@ -173,7 +179,8 @@ def __init__(self,\n n_jobs=None,\n behaviour='old',\n random_state=None,\n- verbose=0):\n+ verbose=0,\n+ warm_start=False):\n super().__init__(\n base_estimator=ExtraTreeRegressor(\n max_features=1,\n@@ -185,6 +192,7 @@ def __init__(self,\n n_estimators=n_estimators,\n max_samples=max_samples,\n max_features=max_features,\n+ warm_start=warm_start,\n n_jobs=n_jobs,\n random_state=random_state,\n verbose=verbose)\n", + "instance_id": "sympy__sympy-11897", + "repo": "sympy/sympy", + "base_commit": "e2918c1205c47345eb73c9be68b14c0f15fdeb17", + "problem_statement": "LaTeX printer inconsistent with pretty printer\nThe LaTeX printer should always give the same output as the pretty printer, unless better output is possible from LaTeX. In some cases it is inconsistent. For instance:\n\n``` py\nIn [9]: var('x', positive=True)\nOut[9]: x\n\nIn [10]: latex(exp(-x)*log(x))\nOut[10]: '\\\\frac{1}{e^{x}} \\\\log{\\\\left (x \\\\right )}'\n\nIn [11]: pprint(exp(-x)*log(x))\n -x\n\u212f \u22c5log(x)\n```\n\n(I also don't think the assumptions should affect printing). \n\n``` py\nIn [14]: var('x y')\nOut[14]: (x, y)\n\nIn [15]: latex(1/(x + y)/2)\nOut[15]: '\\\\frac{1}{2 x + 2 y}'\n\nIn [16]: pprint(1/(x + y)/2)\n 1\n\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n2\u22c5(x + y)\n```\n\n", + "golden_patch": "diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -235,10 +235,12 @@ def _needs_mul_brackets(self, expr, first=False, last=False):\n elif expr.is_Mul:\n if not first and _coeff_isneg(expr):\n return True\n+ if expr.is_Piecewise:\n+ return True\n if any([expr.has(x) for x in (Mod,)]):\n return True\n if (not last and\n- any([expr.has(x) for x in (Integral, Piecewise, Product, Sum)])):\n+ any([expr.has(x) for x in (Integral, Product, Sum)])):\n return True\n \n return False\n", + "test_patch": "diff --git a/sympy/printing/tests/test_latex.py b/sympy/printing/tests/test_latex.py\n--- a/sympy/printing/tests/test_latex.py\n+++ b/sympy/printing/tests/test_latex.py\n@@ -867,7 +867,7 @@ def test_latex_Piecewise():\n p = Piecewise((A**2, Eq(A, B)), (A*B, True))\n s = r\"\\begin{cases} A^{2} & \\text{for}\\: A = B \\\\A B & \\text{otherwise} \\end{cases}\"\n assert latex(p) == s\n- assert latex(A*p) == r\"A %s\" % s\n+ assert latex(A*p) == r\"A \\left(%s\\right)\" % s\n assert latex(p*A) == r\"\\left(%s\\right) A\" % s\n \n \n", + "fail_to_pass": "[\"test_latex_Piecewise\"]", + "pass_to_pass": "[\"test_printmethod\", \"test_latex_basic\", \"test_latex_builtins\", \"test_latex_SingularityFunction\", \"test_latex_cycle\", \"test_latex_permutation\", \"test_latex_Float\", \"test_latex_symbols\", \"test_hyper_printing\", \"test_latex_bessel\", \"test_latex_fresnel\", \"test_latex_brackets\", \"test_latex_subs\", \"test_latex_integrals\", \"test_latex_sets\", \"test_latex_Range\", \"test_latex_sequences\", \"test_latex_intervals\", \"test_latex_AccumuBounds\", \"test_latex_emptyset\", \"test_latex_commutator\", \"test_latex_union\", \"test_latex_symmetric_difference\", \"test_latex_Complement\", \"test_latex_Complexes\", \"test_latex_productset\", \"test_latex_Naturals\", \"test_latex_Naturals0\", \"test_latex_Integers\", \"test_latex_ImageSet\", \"test_latex_ConditionSet\", \"test_latex_ComplexRegion\", \"test_latex_Contains\", \"test_latex_sum\", \"test_latex_product\", \"test_latex_limits\", \"test_issue_3568\", \"test_latex\", \"test_latex_dict\", \"test_latex_list\", \"test_latex_rational\", \"test_latex_inverse\", \"test_latex_DiracDelta\", \"test_latex_Heaviside\", \"test_latex_KroneckerDelta\", \"test_latex_LeviCivita\", \"test_mode\", \"test_latex_Matrix\", \"test_latex_mul_symbol\", \"test_latex_issue_4381\", \"test_latex_issue_4576\", \"test_latex_pow_fraction\", \"test_noncommutative\", \"test_latex_order\", \"test_latex_Lambda\", \"test_latex_PolyElement\", \"test_latex_FracElement\", \"test_latex_Poly\", \"test_latex_ComplexRootOf\", \"test_latex_RootSum\", \"test_settings\", \"test_latex_numbers\", \"test_lamda\", \"test_custom_symbol_names\", \"test_matAdd\", \"test_matMul\", \"test_latex_MatrixSlice\", \"test_latex_RandomDomain\", \"test_PrettyPoly\", \"test_integral_transforms\", \"test_PolynomialRingBase\", \"test_categories\", \"test_Modules\", \"test_QuotientRing\", \"test_Tr\", \"test_Adjoint\", \"test_Hadamard\", \"test_ZeroMatrix\", \"test_boolean_args_order\", \"test_imaginary\", \"test_builtins_without_args\", \"test_latex_greek_functions\", \"test_translate\", \"test_other_symbols\", \"test_modifiers\", \"test_greek_symbols\", \"test_builtin_no_args\", \"test_issue_6853\", \"test_Mul\", \"test_Pow\", \"test_issue_7180\", \"test_issue_8409\", \"test_issue_7117\", \"test_issue_2934\", \"test_issue_10489\"]", "expected_spans": { - "sklearn/ensemble/iforest.py": [ - "IsolationForest" + "sympy/printing/latex.py": [ + "LatexPrinter._needs_mul_brackets" ] }, - "resolved_by": [ - { - "name": "20240523_aider", - "patch": "diff --git a/sklearn/ensemble/iforest.py b/sklearn/ensemble/iforest.py\nindex 8a1bd3625..25b890beb 100644\n--- a/sklearn/ensemble/iforest.py\n+++ b/sklearn/ensemble/iforest.py\n@@ -111,6 +111,16 @@ class IsolationForest(BaseBagging, OutlierMixin):\n ``behaviour`` parameter will be deprecated in 0.22 and removed in\n 0.24.\n \n+ warm_start : bool, optional (default=False)\n+ When set to ``True``, reuse the solution of the previous call to fit\n+ and add more estimators to the ensemble, otherwise, just fit a whole\n+ new forest. See :term:`the Glossary `.\n+\n+ warm_start : bool, optional (default=False)\n+ When set to ``True``, reuse the solution of the previous call to fit\n+ and add more estimators to the ensemble, otherwise, just fit a whole\n+ new forest. See :term:`the Glossary `.\n+\n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n If RandomState instance, random_state is the random number generator;\n@@ -172,6 +182,7 @@ class IsolationForest(BaseBagging, OutlierMixin):\n bootstrap=False,\n n_jobs=None,\n behaviour='old',\n+ warm_start=False,\n random_state=None,\n verbose=0):\n super().__init__(\n@@ -179,6 +190,7 @@ class IsolationForest(BaseBagging, OutlierMixin):\n max_features=1,\n splitter='random',\n random_state=random_state),\n+ warm_start=warm_start,\n # here above max_features has no links with self.max_features\n bootstrap=bootstrap,\n bootstrap_features=False,\n" - }, - { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/sklearn/ensemble/iforest.py\n+++ b/sklearn/ensemble/iforest.py\n@@ -110,6 +110,11 @@\n .. deprecated:: 0.22\n ``behaviour`` parameter will be deprecated in 0.22 and removed in\n 0.24.\n+\n+ warm_start : bool, optional (default=False)\n+ When set to ``True``, reuse the solution of the previous call to fit\n+ and add more estimators to the ensemble, otherwise, just fit a whole\n+ new forest. See :term:`the Glossary `.\n \n random_state : int, RandomState instance or None, optional (default=None)\n If int, random_state is the seed used by the random number generator;\n@@ -172,6 +177,7 @@\n bootstrap=False,\n n_jobs=None,\n behaviour='old',\n+ warm_start=False,\n random_state=None,\n verbose=0):\n super().__init__(\n@@ -186,6 +192,7 @@\n max_samples=max_samples,\n max_features=max_features,\n n_jobs=n_jobs,\n+ warm_start=warm_start,\n random_state=random_state,\n verbose=verbose)\n \n" - }, - { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/sklearn/ensemble/iforest.py b/sklearn/ensemble/iforest.py\nindex 8a1bd3625..f4a7d93d3 100644\n--- a/sklearn/ensemble/iforest.py\n+++ b/sklearn/ensemble/iforest.py\n@@ -28,6 +28,9 @@ INTEGER_TYPES = (numbers.Integral, np.integer)\n \n class IsolationForest(BaseBagging, OutlierMixin):\n \"\"\"Isolation Forest Algorithm\n+ \n+ Attributes\n+ ----------\n \n Return the anomaly score of each sample using the IsolationForest algorithm\n \n@@ -118,6 +121,11 @@ class IsolationForest(BaseBagging, OutlierMixin):\n by `np.random`.\n \n verbose : int, optional (default=0)\n+ \n+ warm_start : bool, optional (default=False)\n+ When set to ``True``, reuse the solution of the previous call to fit\n+ and add more estimators to the ensemble, otherwise, just fit a whole\n+ new forest. See :term:`the Glossary `.\n Controls the verbosity of the tree building process.\n \n \n@@ -173,7 +181,8 @@ class IsolationForest(BaseBagging, OutlierMixin):\n n_jobs=None,\n behaviour='old',\n random_state=None,\n- verbose=0):\n+ verbose=0,\n+ warm_start=False):\n super().__init__(\n base_estimator=ExtraTreeRegressor(\n max_features=1,\n@@ -187,10 +196,12 @@ class IsolationForest(BaseBagging, OutlierMixin):\n max_features=max_features,\n n_jobs=n_jobs,\n random_state=random_state,\n- verbose=verbose)\n+ verbose=verbose,\n+ warm_start=warm_start)\n \n self.behaviour = behaviour\n self.contamination = contamination\n+ self.warm_start = warm_start\n \n def _set_oob_score(self, X, y):\n raise NotImplementedError(\"OOB score not supported by iforest\")\n@@ -275,7 +286,7 @@ class IsolationForest(BaseBagging, OutlierMixin):\n \n self.max_samples_ = max_samples\n max_depth = int(np.ceil(np.log2(max(max_samples, 2))))\n- super()._fit(X, y, max_samples,\n+ super()._fit(X, y, self.max_samples_,\n max_depth=max_depth,\n sample_weight=sample_weight)\n \n" - }, - { - "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/sklearn/ensemble/iforest.py b/sklearn/ensemble/iforest.py\nindex 8a1bd3625..8a6ef5188 100644\n--- a/sklearn/ensemble/iforest.py\n+++ b/sklearn/ensemble/iforest.py\n@@ -173,7 +173,8 @@ class IsolationForest(BaseBagging, OutlierMixin):\n n_jobs=None,\n behaviour='old',\n random_state=None,\n- verbose=0):\n+ verbose=0,\n+ warm_start=False): # Added warm_start parameter\n super().__init__(\n base_estimator=ExtraTreeRegressor(\n max_features=1,\n@@ -187,10 +188,18 @@ class IsolationForest(BaseBagging, OutlierMixin):\n max_features=max_features,\n n_jobs=n_jobs,\n random_state=random_state,\n- verbose=verbose)\n+ verbose=verbose,\n+ warm_start=warm_start) # Pass warm_start to super\n \n self.behaviour = behaviour\n self.contamination = contamination\n+ # Documentation for warm_start parameter\n+ \"\"\"\n+ warm_start : bool, optional (default=False)\n+ When set to ``True``, reuse the solution of the previous call to fit\n+ and add more estimators to the ensemble, otherwise, just fit a whole\n+ new forest. See :term:`the Glossary `.\n+ \"\"\"\n \n def _set_oob_score(self, X, y):\n raise NotImplementedError(\"OOB score not supported by iforest\")\n" - }, - { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/sklearn/ensemble/iforest.py b/sklearn/ensemble/iforest.py\nindex 8a1bd3625..c665370ab 100644\n--- a/sklearn/ensemble/iforest.py\n+++ b/sklearn/ensemble/iforest.py\n@@ -120,6 +120,10 @@ class IsolationForest(BaseBagging, OutlierMixin):\n verbose : int, optional (default=0)\n Controls the verbosity of the tree building process.\n \n+ warm_start : bool, optional (default=False)\n+ When set to ``True``, reuse the solution of the previous call to fit\n+ and add more estimators to the ensemble, otherwise, just fit a whole\n+ new forest. See :term:`the Glossary `.\n \n Attributes\n ----------\n@@ -173,7 +177,8 @@ class IsolationForest(BaseBagging, OutlierMixin):\n n_jobs=None,\n behaviour='old',\n random_state=None,\n- verbose=0):\n+ verbose=0,\n+ warm_start=False):\n super().__init__(\n base_estimator=ExtraTreeRegressor(\n max_features=1,\n@@ -187,7 +192,8 @@ class IsolationForest(BaseBagging, OutlierMixin):\n max_features=max_features,\n n_jobs=n_jobs,\n random_state=random_state,\n- verbose=verbose)\n+ verbose=verbose,\n+ warm_start=warm_start)\n \n self.behaviour = behaviour\n self.contamination = contamination\n" - }, - { - "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/sklearn/ensemble/iforest.py b/sklearn/ensemble/iforest.py\nindex 8a1bd3625..08eaa2cc5 100644\n--- a/sklearn/ensemble/iforest.py\n+++ b/sklearn/ensemble/iforest.py\n@@ -70,6 +70,11 @@ class IsolationForest(BaseBagging, OutlierMixin):\n on the decision function. If 'auto', the decision function threshold is\n determined as in the original paper.\n \n+ warm_start : bool, optional (default=False)\n+ When set to ``True``, reuse the solution of the previous call to fit\n+ and add more estimators to the ensemble, otherwise, just fit a whole\n+ new forest. See :term:`the Glossary `.\n+\n .. versionchanged:: 0.20\n The default value of ``contamination`` will change from 0.1 in 0.20\n to ``'auto'`` in 0.22.\n@@ -173,7 +178,8 @@ class IsolationForest(BaseBagging, OutlierMixin):\n n_jobs=None,\n behaviour='old',\n random_state=None,\n- verbose=0):\n+ verbose=0,\n+ warm_start=False): # Add warm_start parameter with default value False\n super().__init__(\n base_estimator=ExtraTreeRegressor(\n max_features=1,\n@@ -187,7 +193,8 @@ class IsolationForest(BaseBagging, OutlierMixin):\n max_features=max_features,\n n_jobs=n_jobs,\n random_state=random_state,\n- verbose=verbose)\n+ verbose=verbose,\n+ warm_start=warm_start) # Pass warm_start to the parent class\n \n self.behaviour = behaviour\n self.contamination = contamination\ndiff --git a/sklearn/ensemble/tests/test_iforest.py b/sklearn/ensemble/tests/test_iforest.py\nindex 67ba2d7f9..24c9d9a2d 100644\n--- a/sklearn/ensemble/tests/test_iforest.py\n+++ b/sklearn/ensemble/tests/test_iforest.py\n@@ -358,4 +358,21 @@ def test_iforest_chunks_works2(\n mocked_get_chunk, contamination, n_predict_calls\n ):\n test_iforest_works(contamination)\n- assert mocked_get_chunk.call_count == n_predict_calls\n+def test_iforest_warm_start():\n+ \"\"\"Test IsolationForest warm_start parameter.\"\"\"\n+ X = iris.data\n+ rng = check_random_state(42)\n+ iforest1 = IsolationForest(n_estimators=10, warm_start=True, random_state=rng)\n+ iforest1.fit(X)\n+ n_estimators_increment = 5\n+ iforest1.set_params(n_estimators=iforest1.n_estimators + n_estimators_increment)\n+ iforest1.fit(X)\n+\n+ iforest2 = IsolationForest(n_estimators=iforest1.n_estimators, random_state=rng)\n+ iforest2.fit(X)\n+\n+ assert_equal(len(iforest1.estimators_), len(iforest2.estimators_),\n+ \"IsolationForest warm_start adds more trees.\")\n+ assert_allclose(iforest1.decision_function(X), iforest2.decision_function(X),\n+ rtol=1e-5, atol=0,\n+ err_msg=\"IsolationForest warm_start should produce the same results\")\n" - } - ], + "test_file_spans": { + "sympy/printing/tests/test_latex.py": [ + "test_latex_Piecewise" + ] + }, + "resolved_by": [], "alternative_spans": [] }, { - "instance_id": "scikit-learn__scikit-learn-13497", - "repo": "scikit-learn/scikit-learn", - "base_commit": "26f690961a52946dd2f53bf0fdd4264b2ae5be90", - "problem_statement": "Comparing string to array in _estimate_mi\nIn ``_estimate_mi`` there is ``discrete_features == 'auto'`` but discrete features can be an array of indices or a boolean mask.\r\nThis will error in future versions of numpy.\r\nAlso this means we never test this function with discrete features != 'auto', it seems?\n", - "golden_patch": "diff --git a/sklearn/feature_selection/mutual_info_.py b/sklearn/feature_selection/mutual_info_.py\n--- a/sklearn/feature_selection/mutual_info_.py\n+++ b/sklearn/feature_selection/mutual_info_.py\n@@ -10,7 +10,7 @@\n from ..preprocessing import scale\n from ..utils import check_random_state\n from ..utils.fixes import _astype_copy_false\n-from ..utils.validation import check_X_y\n+from ..utils.validation import check_array, check_X_y\n from ..utils.multiclass import check_classification_targets\n \n \n@@ -247,14 +247,16 @@ def _estimate_mi(X, y, discrete_features='auto', discrete_target=False,\n X, y = check_X_y(X, y, accept_sparse='csc', y_numeric=not discrete_target)\n n_samples, n_features = X.shape\n \n- if discrete_features == 'auto':\n- discrete_features = issparse(X)\n-\n- if isinstance(discrete_features, bool):\n+ if isinstance(discrete_features, (str, bool)):\n+ if isinstance(discrete_features, str):\n+ if discrete_features == 'auto':\n+ discrete_features = issparse(X)\n+ else:\n+ raise ValueError(\"Invalid string value for discrete_features.\")\n discrete_mask = np.empty(n_features, dtype=bool)\n discrete_mask.fill(discrete_features)\n else:\n- discrete_features = np.asarray(discrete_features)\n+ discrete_features = check_array(discrete_features, ensure_2d=False)\n if discrete_features.dtype != 'bool':\n discrete_mask = np.zeros(n_features, dtype=bool)\n discrete_mask[discrete_features] = True\n", + "instance_id": "sympy__sympy-12171", + "repo": "sympy/sympy", + "base_commit": "ca6ef27272be31c9dc3753ede9232c39df9a75d8", + "problem_statement": "matematica code printer does not handle floats and derivatives correctly\nIn its current state the mathematica code printer does not handle Derivative(func(vars), deriver) \r\ne.g. Derivative(f(t), t) yields Derivative(f(t), t) instead of D[f[t],t]\r\n\r\nAlso floats with exponents are not handled correctly e.g. 1.0e-4 is not converted to 1.0*^-4\r\n\r\nThis has an easy fix by adding the following lines to MCodePrinter:\r\n\r\n\r\ndef _print_Derivative(self, expr):\r\n return \"D[%s]\" % (self.stringify(expr.args, \", \"))\r\n\r\ndef _print_Float(self, expr):\r\n res =str(expr)\r\n return res.replace('e','*^') \r\n\r\n\r\n\n", + "golden_patch": "diff --git a/sympy/printing/mathematica.py b/sympy/printing/mathematica.py\n--- a/sympy/printing/mathematica.py\n+++ b/sympy/printing/mathematica.py\n@@ -109,6 +109,9 @@ def _print_Integral(self, expr):\n def _print_Sum(self, expr):\n return \"Hold[Sum[\" + ', '.join(self.doprint(a) for a in expr.args) + \"]]\"\n \n+ def _print_Derivative(self, expr):\n+ return \"Hold[D[\" + ', '.join(self.doprint(a) for a in expr.args) + \"]]\"\n+\n \n def mathematica_code(expr, **settings):\n r\"\"\"Converts an expr to a string of the Wolfram Mathematica code\n", + "test_patch": "diff --git a/sympy/printing/tests/test_mathematica.py b/sympy/printing/tests/test_mathematica.py\n--- a/sympy/printing/tests/test_mathematica.py\n+++ b/sympy/printing/tests/test_mathematica.py\n@@ -1,5 +1,5 @@\n from sympy.core import (S, pi, oo, symbols, Function,\n- Rational, Integer, Tuple)\n+ Rational, Integer, Tuple, Derivative)\n from sympy.integrals import Integral\n from sympy.concrete import Sum\n from sympy.functions import exp, sin, cos\n@@ -74,6 +74,14 @@ def test_Integral():\n \"{y, -Infinity, Infinity}]]\"\n \n \n+def test_Derivative():\n+ assert mcode(Derivative(sin(x), x)) == \"Hold[D[Sin[x], x]]\"\n+ assert mcode(Derivative(x, x)) == \"Hold[D[x, x]]\"\n+ assert mcode(Derivative(sin(x)*y**4, x, 2)) == \"Hold[D[y^4*Sin[x], x, x]]\"\n+ assert mcode(Derivative(sin(x)*y**4, x, y, x)) == \"Hold[D[y^4*Sin[x], x, y, x]]\"\n+ assert mcode(Derivative(sin(x)*y**4, x, y, 3, x)) == \"Hold[D[y^4*Sin[x], x, y, y, y, x]]\"\n+\n+\n def test_Sum():\n assert mcode(Sum(sin(x), (x, 0, 10))) == \"Hold[Sum[Sin[x], {x, 0, 10}]]\"\n assert mcode(Sum(exp(-x**2 - y**2),\n", + "fail_to_pass": "[\"test_Derivative\"]", + "pass_to_pass": "[\"test_Integer\", \"test_Rational\", \"test_Function\", \"test_Pow\", \"test_Mul\", \"test_constants\", \"test_containers\", \"test_Integral\"]", "expected_spans": { - "sklearn/feature_selection/mutual_info_.py": [ + "sympy/printing/mathematica.py": [ + "MCodePrinter" + ] + }, + "test_file_spans": { + "sympy/printing/tests/test_mathematica.py": [ "imports", - "_estimate_mi" + "test_Sum" ] }, "resolved_by": [], "alternative_spans": [] }, { - "instance_id": "django__django-11133", - "repo": "django/django", - "base_commit": "879cc3da6249e920b8d54518a0ae06de835d7373", - "problem_statement": "HttpResponse doesn't handle memoryview objects\nDescription\n\t\nI am trying to write a BinaryField retrieved from the database into a HttpResponse. When the database is Sqlite this works correctly, but Postgresql returns the contents of the field as a memoryview object and it seems like current Django doesn't like this combination:\nfrom django.http import HttpResponse\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n# String content\nresponse = HttpResponse(\"My Content\")\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\nresponse.content\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n# Out: b'My Content'\n# This is correct\n# Bytes content\nresponse = HttpResponse(b\"My Content\")\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \nresponse.content\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n# Out: b'My Content'\n# This is also correct\n# memoryview content\nresponse = HttpResponse(memoryview(b\"My Content\"))\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \nresponse.content\n# Out: b''\n# This is not correct, I am expecting b'My Content'\n", - "golden_patch": "diff --git a/django/http/response.py b/django/http/response.py\n--- a/django/http/response.py\n+++ b/django/http/response.py\n@@ -229,7 +229,7 @@ def make_bytes(self, value):\n # Handle string types -- we can't rely on force_bytes here because:\n # - Python attempts str conversion first\n # - when self._charset != 'utf-8' it re-encodes the content\n- if isinstance(value, bytes):\n+ if isinstance(value, (bytes, memoryview)):\n return bytes(value)\n if isinstance(value, str):\n return bytes(value.encode(self.charset))\n", + "instance_id": "sympy__sympy-12236", + "repo": "sympy/sympy", + "base_commit": "d60497958f6dea7f5e25bc41e9107a6a63694d01", + "problem_statement": "Wrong result with apart\n```\r\nPython 3.6.0 |Continuum Analytics, Inc.| (default, Dec 23 2016, 12:22:00) \r\nType \"copyright\", \"credits\" or \"license\" for more information.\r\n\r\nIPython 5.1.0 -- An enhanced Interactive Python.\r\n? -> Introduction and overview of IPython's features.\r\n%quickref -> Quick reference.\r\nhelp -> Python's own help system.\r\nobject? -> Details about 'object', use 'object??' for extra details.\r\n\r\nIn [1]: from sympy import symbols\r\n\r\nIn [2]: a = symbols('a', real=True)\r\n\r\nIn [3]: t = symbols('t', real=True, negative=False)\r\n\r\nIn [4]: bug = a * (-t + (-t + 1) * (2 * t - 1)) / (2 * t - 1)\r\n\r\nIn [5]: bug.subs(a, 1)\r\nOut[5]: (-t + (-t + 1)*(2*t - 1))/(2*t - 1)\r\n\r\nIn [6]: bug.subs(a, 1).apart()\r\nOut[6]: -t + 1/2 - 1/(2*(2*t - 1))\r\n\r\nIn [7]: bug.subs(a, 1).apart(t)\r\nOut[7]: -t + 1/2 - 1/(2*(2*t - 1))\r\n\r\nIn [8]: bug.apart(t)\r\nOut[8]: -a*t\r\n\r\nIn [9]: import sympy; sympy.__version__\r\nOut[9]: '1.0'\r\n```\nWrong result with apart\n```\r\nPython 3.6.0 |Continuum Analytics, Inc.| (default, Dec 23 2016, 12:22:00) \r\nType \"copyright\", \"credits\" or \"license\" for more information.\r\n\r\nIPython 5.1.0 -- An enhanced Interactive Python.\r\n? -> Introduction and overview of IPython's features.\r\n%quickref -> Quick reference.\r\nhelp -> Python's own help system.\r\nobject? -> Details about 'object', use 'object??' for extra details.\r\n\r\nIn [1]: from sympy import symbols\r\n\r\nIn [2]: a = symbols('a', real=True)\r\n\r\nIn [3]: t = symbols('t', real=True, negative=False)\r\n\r\nIn [4]: bug = a * (-t + (-t + 1) * (2 * t - 1)) / (2 * t - 1)\r\n\r\nIn [5]: bug.subs(a, 1)\r\nOut[5]: (-t + (-t + 1)*(2*t - 1))/(2*t - 1)\r\n\r\nIn [6]: bug.subs(a, 1).apart()\r\nOut[6]: -t + 1/2 - 1/(2*(2*t - 1))\r\n\r\nIn [7]: bug.subs(a, 1).apart(t)\r\nOut[7]: -t + 1/2 - 1/(2*(2*t - 1))\r\n\r\nIn [8]: bug.apart(t)\r\nOut[8]: -a*t\r\n\r\nIn [9]: import sympy; sympy.__version__\r\nOut[9]: '1.0'\r\n```\n", + "golden_patch": "diff --git a/sympy/polys/domains/polynomialring.py b/sympy/polys/domains/polynomialring.py\n--- a/sympy/polys/domains/polynomialring.py\n+++ b/sympy/polys/domains/polynomialring.py\n@@ -104,10 +104,10 @@ def from_PolynomialRing(K1, a, K0):\n \n def from_FractionField(K1, a, K0):\n \"\"\"Convert a rational function to ``dtype``. \"\"\"\n- denom = K0.denom(a)\n+ q, r = K0.numer(a).div(K0.denom(a))\n \n- if denom.is_ground:\n- return K1.from_PolynomialRing(K0.numer(a)/denom, K0.field.ring.to_domain())\n+ if r.is_zero:\n+ return K1.from_PolynomialRing(q, K0.field.ring.to_domain())\n else:\n return None\n \n", + "test_patch": "diff --git a/sympy/polys/tests/test_partfrac.py b/sympy/polys/tests/test_partfrac.py\n--- a/sympy/polys/tests/test_partfrac.py\n+++ b/sympy/polys/tests/test_partfrac.py\n@@ -8,7 +8,7 @@\n )\n \n from sympy import (S, Poly, E, pi, I, Matrix, Eq, RootSum, Lambda,\n- Symbol, Dummy, factor, together, sqrt, Expr)\n+ Symbol, Dummy, factor, together, sqrt, Expr, Rational)\n from sympy.utilities.pytest import raises, XFAIL\n from sympy.abc import x, y, a, b, c\n \n@@ -37,6 +37,18 @@ def test_apart():\n \n assert apart(Eq((x**2 + 1)/(x + 1), x), x) == Eq(x - 1 + 2/(x + 1), x)\n \n+ assert apart(x/2, y) == x/2\n+\n+ f, g = (x+y)/(2*x - y), Rational(3/2)*y/((2*x - y)) + Rational(1/2)\n+\n+ assert apart(f, x, full=False) == g\n+ assert apart(f, x, full=True) == g\n+\n+ f, g = (x+y)/(2*x - y), 3*x/(2*x - y) - 1\n+\n+ assert apart(f, y, full=False) == g\n+ assert apart(f, y, full=True) == g\n+\n raises(NotImplementedError, lambda: apart(1/(x + 1)/(y + 2)))\n \n \ndiff --git a/sympy/polys/tests/test_polytools.py b/sympy/polys/tests/test_polytools.py\n--- a/sympy/polys/tests/test_polytools.py\n+++ b/sympy/polys/tests/test_polytools.py\n@@ -1700,6 +1700,10 @@ def test_div():\n q = f.exquo(g)\n assert q.get_domain().is_ZZ\n \n+ f, g = Poly(x+y, x), Poly(2*x+y, x)\n+ q, r = f.div(g)\n+ assert q.get_domain().is_Frac and r.get_domain().is_Frac\n+\n \n def test_gcdex():\n f, g = 2*x, x**2 - 16\n", + "fail_to_pass": "[\"test_div\"]", + "pass_to_pass": "[\"test_apart_matrix\", \"test_apart_symbolic\", \"test_apart_full\", \"test_apart_undetermined_coeffs\", \"test_apart_list\", \"test_assemble_partfrac_list\", \"test_noncommutative\", \"test_Poly_from_dict\", \"test_Poly_from_list\", \"test_Poly_from_poly\", \"test_Poly_from_expr\", \"test_Poly__new__\", \"test_Poly__args\", \"test_Poly__gens\", \"test_Poly_zero\", \"test_Poly_one\", \"test_Poly__unify\", \"test_Poly_free_symbols\", \"test_PurePoly_free_symbols\", \"test_Poly__eq__\", \"test_PurePoly__eq__\", \"test_PurePoly_Poly\", \"test_Poly_get_domain\", \"test_Poly_set_domain\", \"test_Poly_get_modulus\", \"test_Poly_set_modulus\", \"test_Poly_add_ground\", \"test_Poly_sub_ground\", \"test_Poly_mul_ground\", \"test_Poly_quo_ground\", \"test_Poly_exquo_ground\", \"test_Poly_abs\", \"test_Poly_neg\", \"test_Poly_add\", \"test_Poly_sub\", \"test_Poly_mul\", \"test_Poly_sqr\", \"test_Poly_pow\", \"test_Poly_divmod\", \"test_Poly_eq_ne\", \"test_Poly_nonzero\", \"test_Poly_properties\", \"test_Poly_is_irreducible\", \"test_Poly_subs\", \"test_Poly_replace\", \"test_Poly_reorder\", \"test_Poly_ltrim\", \"test_Poly_has_only_gens\", \"test_Poly_to_ring\", \"test_Poly_to_field\", \"test_Poly_to_exact\", \"test_Poly_retract\", \"test_Poly_slice\", \"test_Poly_coeffs\", \"test_Poly_monoms\", \"test_Poly_terms\", \"test_Poly_all_coeffs\", \"test_Poly_all_monoms\", \"test_Poly_all_terms\", \"test_Poly_termwise\", \"test_Poly_length\", \"test_Poly_as_dict\", \"test_Poly_as_expr\", \"test_Poly_lift\", \"test_Poly_deflate\", \"test_Poly_inject\", \"test_Poly_eject\", \"test_Poly_exclude\", \"test_Poly__gen_to_level\", \"test_Poly_degree\", \"test_Poly_degree_list\", \"test_Poly_total_degree\", \"test_Poly_homogenize\", \"test_Poly_homogeneous_order\", \"test_Poly_LC\", \"test_Poly_TC\", \"test_Poly_EC\", \"test_Poly_coeff\", \"test_Poly_nth\", \"test_Poly_LM\", \"test_Poly_LM_custom_order\", \"test_Poly_EM\", \"test_Poly_LT\", \"test_Poly_ET\", \"test_Poly_max_norm\", \"test_Poly_l1_norm\", \"test_Poly_clear_denoms\", \"test_Poly_rat_clear_denoms\", \"test_Poly_integrate\", \"test_Poly_diff\", \"test_issue_9585\", \"test_Poly_eval\", \"test_Poly___call__\", \"test_parallel_poly_from_expr\", \"test_pdiv\", \"test_gcdex\", \"test_revert\", \"test_subresultants\", \"test_resultant\", \"test_discriminant\", \"test_dispersion\", \"test_gcd_list\", \"test_lcm_list\", \"test_gcd\", \"test_gcd_numbers_vs_polys\", \"test_terms_gcd\", \"test_trunc\", \"test_monic\", \"test_content\", \"test_primitive\", \"test_compose\", \"test_shift\", \"test_transform\", \"test_gff\", \"test_sqf_norm\", \"test_sqf\", \"test_factor_large\", \"test_refine_root\", \"test_count_roots\", \"test_Poly_root\", \"test_real_roots\", \"test_all_roots\", \"test_ground_roots\", \"test_nth_power_roots_poly\", \"test_reduced\", \"test_groebner\", \"test_fglm\", \"test_is_zero_dimensional\", \"test_GroebnerBasis\", \"test_poly\", \"test_keep_coeff\", \"test_to_rational_coeffs\", \"test_factor_terms\"]", "expected_spans": { - "django/http/response.py": [ - "HttpResponseBase.make_bytes" + "sympy/polys/domains/polynomialring.py": [ + "PolynomialRing.from_FractionField" + ] + }, + "test_file_spans": { + "sympy/polys/tests/test_partfrac.py": [ + "imports", + "test_apart" + ], + "sympy/polys/tests/test_polytools.py": [ + "test_div" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-12419", + "repo": "sympy/sympy", + "base_commit": "479939f8c65c8c2908bbedc959549a257a7c0b0b", + "problem_statement": "Sum of the elements of an identity matrix is zero\nI think this is a bug.\r\n\r\nI created a matrix by M.T * M under an assumption that M is orthogonal. SymPy successfully recognized that the result is an identity matrix. I tested its identity-ness by element-wise, queries, and sum of the diagonal elements and received expected results.\r\n\r\nHowever, when I attempt to evaluate the total sum of the elements the result was 0 while 'n' is expected.\r\n\r\n```\r\nfrom sympy import *\r\nfrom sympy import Q as Query\r\n\r\nn = Symbol('n', integer=True, positive=True)\r\ni, j = symbols('i j', integer=True)\r\nM = MatrixSymbol('M', n, n)\r\n\r\ne = None\r\nwith assuming(Query.orthogonal(M)):\r\n e = refine((M.T * M).doit())\r\n\r\n# Correct: M.T * M is an identity matrix.\r\nprint(e, e[0, 0], e[0, 1], e[1, 0], e[1, 1])\r\n\r\n# Correct: The output is True True\r\nprint(ask(Query.diagonal(e)), ask(Query.integer_elements(e)))\r\n\r\n# Correct: The sum of the diagonal elements is n\r\nprint(Sum(e[i, i], (i, 0, n-1)).doit())\r\n\r\n# So far so good\r\n# Total sum of the elements is expected to be 'n' but the answer is 0!\r\nprint(Sum(Sum(e[i, j], (i, 0, n-1)), (j, 0, n-1)).doit())\r\n```\n", + "golden_patch": "diff --git a/sympy/matrices/expressions/matexpr.py b/sympy/matrices/expressions/matexpr.py\n--- a/sympy/matrices/expressions/matexpr.py\n+++ b/sympy/matrices/expressions/matexpr.py\n@@ -2,11 +2,12 @@\n \n from functools import wraps\n \n-from sympy.core import S, Symbol, Tuple, Integer, Basic, Expr\n+from sympy.core import S, Symbol, Tuple, Integer, Basic, Expr, Eq\n from sympy.core.decorators import call_highest_priority\n from sympy.core.compatibility import range\n from sympy.core.sympify import SympifyError, sympify\n from sympy.functions import conjugate, adjoint\n+from sympy.functions.special.tensor_functions import KroneckerDelta\n from sympy.matrices import ShapeError\n from sympy.simplify import simplify\n \n@@ -375,7 +376,6 @@ def _eval_derivative(self, v):\n if self.args[0] != v.args[0]:\n return S.Zero\n \n- from sympy import KroneckerDelta\n return KroneckerDelta(self.args[1], v.args[1])*KroneckerDelta(self.args[2], v.args[2])\n \n \n@@ -476,10 +476,12 @@ def conjugate(self):\n return self\n \n def _entry(self, i, j):\n- if i == j:\n+ eq = Eq(i, j)\n+ if eq is S.true:\n return S.One\n- else:\n+ elif eq is S.false:\n return S.Zero\n+ return KroneckerDelta(i, j)\n \n def _eval_determinant(self):\n return S.One\n", + "test_patch": "diff --git a/sympy/matrices/expressions/tests/test_matexpr.py b/sympy/matrices/expressions/tests/test_matexpr.py\n--- a/sympy/matrices/expressions/tests/test_matexpr.py\n+++ b/sympy/matrices/expressions/tests/test_matexpr.py\n@@ -65,6 +65,7 @@ def test_ZeroMatrix():\n with raises(ShapeError):\n Z**2\n \n+\n def test_ZeroMatrix_doit():\n Znn = ZeroMatrix(Add(n, n, evaluate=False), n)\n assert isinstance(Znn.rows, Add)\n@@ -74,6 +75,8 @@ def test_ZeroMatrix_doit():\n \n def test_Identity():\n A = MatrixSymbol('A', n, m)\n+ i, j = symbols('i j')\n+\n In = Identity(n)\n Im = Identity(m)\n \n@@ -84,6 +87,11 @@ def test_Identity():\n assert In.inverse() == In\n assert In.conjugate() == In\n \n+ assert In[i, j] != 0\n+ assert Sum(In[i, j], (i, 0, n-1), (j, 0, n-1)).subs(n,3).doit() == 3\n+ assert Sum(Sum(In[i, j], (i, 0, n-1)), (j, 0, n-1)).subs(n,3).doit() == 3\n+\n+\n def test_Identity_doit():\n Inn = Identity(Add(n, n, evaluate=False))\n assert isinstance(Inn.rows, Add)\n", + "fail_to_pass": "[\"test_Identity\"]", + "pass_to_pass": "[\"test_shape\", \"test_matexpr\", \"test_subs\", \"test_ZeroMatrix\", \"test_ZeroMatrix_doit\", \"test_Identity_doit\", \"test_addition\", \"test_multiplication\", \"test_MatPow\", \"test_MatrixSymbol\", \"test_dense_conversion\", \"test_free_symbols\", \"test_zero_matmul\", \"test_matadd_simplify\", \"test_matmul_simplify\", \"test_invariants\", \"test_indexing\", \"test_single_indexing\", \"test_MatrixElement_commutative\", \"test_MatrixSymbol_determinant\", \"test_MatrixElement_diff\", \"test_MatrixElement_doit\", \"test_identity_powers\", \"test_Zero_power\", \"test_matrixelement_diff\"]", + "expected_spans": { + "sympy/matrices/expressions/matexpr.py": [ + "imports", + "MatrixElement._eval_derivative", + "Identity._entry" + ] + }, + "test_file_spans": { + "sympy/matrices/expressions/tests/test_matexpr.py": [ + "test_ZeroMatrix_doit", + "test_Identity", + "test_Identity_doit" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-12454", + "repo": "sympy/sympy", + "base_commit": "d3fcdb72bfcbb560eb45264ac1c03f359436edef", + "problem_statement": "is_upper() raises IndexError for tall matrices\nThe function Matrix.is_upper raises an IndexError for a 4x2 matrix of zeros.\r\n```\r\n>>> sympy.zeros(4,2).is_upper\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"sympy/matrices/matrices.py\", line 1112, in is_upper\r\n for i in range(1, self.rows)\r\n File \"sympy/matrices/matrices.py\", line 1113, in \r\n for j in range(i))\r\n File \"sympy/matrices/dense.py\", line 119, in __getitem__\r\n return self.extract(i, j)\r\n File \"sympy/matrices/matrices.py\", line 352, in extract\r\n colsList = [a2idx(k, self.cols) for k in colsList]\r\n File \"sympy/matrices/matrices.py\", line 5261, in a2idx\r\n raise IndexError(\"Index out of range: a[%s]\" % (j,))\r\nIndexError: Index out of range: a[2]\r\n```\r\nThe code for is_upper() is\r\n```\r\n return all(self[i, j].is_zero\r\n for i in range(1, self.rows)\r\n for j in range(i))\r\n```\r\nFor a 4x2 matrix, is_upper iterates over the indices:\r\n```\r\n>>> A = sympy.zeros(4, 2)\r\n>>> print tuple([i, j] for i in range(1, A.rows) for j in range(i))\r\n([1, 0], [2, 0], [2, 1], [3, 0], [3, 1], [3, 2])\r\n```\r\nThe attempt to index the (3,2) entry appears to be the source of the error. \n", + "golden_patch": "diff --git a/sympy/matrices/matrices.py b/sympy/matrices/matrices.py\n--- a/sympy/matrices/matrices.py\n+++ b/sympy/matrices/matrices.py\n@@ -641,7 +641,7 @@ def _eval_is_zero(self):\n def _eval_is_upper_hessenberg(self):\n return all(self[i, j].is_zero\n for i in range(2, self.rows)\n- for j in range(i - 1))\n+ for j in range(min(self.cols, (i - 1))))\n \n def _eval_values(self):\n return [i for i in self if not i.is_zero]\n@@ -1112,7 +1112,7 @@ def is_upper(self):\n \"\"\"\n return all(self[i, j].is_zero\n for i in range(1, self.rows)\n- for j in range(i))\n+ for j in range(min(i, self.cols)))\n \n @property\n def is_zero(self):\n", + "test_patch": "diff --git a/sympy/matrices/tests/test_matrices.py b/sympy/matrices/tests/test_matrices.py\n--- a/sympy/matrices/tests/test_matrices.py\n+++ b/sympy/matrices/tests/test_matrices.py\n@@ -1225,6 +1225,8 @@ def test_is_upper():\n assert a.is_upper is True\n a = Matrix([[1], [2], [3]])\n assert a.is_upper is False\n+ a = zeros(4, 2)\n+ assert a.is_upper is True\n \n \n def test_is_lower():\n@@ -1880,6 +1882,9 @@ def test_hessenberg():\n A = Matrix([[3, 4, 1], [2, 4, 5], [3, 1, 2]])\n assert not A.is_upper_hessenberg\n \n+ A = zeros(5, 2)\n+ assert A.is_upper_hessenberg\n+\n \n def test_cholesky():\n raises(NonSquareMatrixError, lambda: Matrix((1, 2)).cholesky())\n", + "fail_to_pass": "[\"test_is_upper\", \"test_hessenberg\"]", + "pass_to_pass": "[\"test_args\", \"test_division\", \"test_sum\", \"test_addition\", \"test_fancy_index_matrix\", \"test_multiplication\", \"test_power\", \"test_creation\", \"test_tolist\", \"test_as_mutable\", \"test_determinant\", \"test_det_LU_decomposition\", \"test_berkowitz_minors\", \"test_slicing\", \"test_submatrix_assignment\", \"test_extract\", \"test_reshape\", \"test_applyfunc\", \"test_expand\", \"test_random\", \"test_LUdecomp\", \"test_LUsolve\", \"test_QRsolve\", \"test_inverse\", \"test_matrix_inverse_mod\", \"test_util\", \"test_jacobian_hessian\", \"test_QR\", \"test_QR_non_square\", \"test_nullspace\", \"test_columnspace\", \"test_wronskian\", \"test_subs\", \"test_xreplace\", \"test_transpose\", \"test_conjugate\", \"test_conj_dirac\", \"test_trace\", \"test_shape\", \"test_col_row_op\", \"test_zip_row_op\", \"test_issue_3950\", \"test_issue_3981\", \"test_evalf\", \"test_is_symbolic\", \"test_is_lower\", \"test_is_nilpotent\", \"test_zeros_ones_fill\", \"test_empty_zeros\", \"test_inv_iszerofunc\", \"test_jacobian_metrics\", \"test_jacobian2\", \"test_issue_4564\", \"test_nonvectorJacobian\", \"test_vec\", \"test_vech\", \"test_vech_errors\", \"test_diag\", \"test_get_diag_blocks1\", \"test_get_diag_blocks2\", \"test_inv_block\", \"test_creation_args\", \"test_diagonal_symmetrical\", \"test_diagonalization\", \"test_jordan_form\", \"test_jordan_form_complex_issue_9274\", \"test_issue_10220\", \"test_Matrix_berkowitz_charpoly\", \"test_exp\", \"test_has\", \"test_errors\", \"test_len\", \"test_integrate\", \"test_diff\", \"test_getattr\", \"test_cholesky\", \"test_LDLdecomposition\", \"test_cholesky_solve\", \"test_LDLsolve\", \"test_lower_triangular_solve\", \"test_upper_triangular_solve\", \"test_diagonal_solve\", \"test_singular_values\", \"test_condition_number\", \"test_equality\", \"test_col_join\", \"test_row_insert\", \"test_col_insert\", \"test_normalized\", \"test_print_nonzero\", \"test_zeros_eye\", \"test_is_zero\", \"test_rotation_matrices\", \"test_DeferredVector\", \"test_DeferredVector_not_iterable\", \"test_DeferredVector_Matrix\", \"test_GramSchmidt\", \"test_casoratian\", \"test_zero_dimension_multiply\", \"test_slice_issue_2884\", \"test_slice_issue_3401\", \"test_copyin\", \"test_invertible_check\", \"test_issue_5964\", \"test_issue_7604\", \"test_is_Identity\", \"test_dot\", \"test_dual\", \"test_anti_symmetric\", \"test_issue_5321\", \"test_issue_5320\", \"test_issue_11944\", \"test_cross\", \"test_hash\", \"test_adjoint\", \"test_simplify_immutable\", \"test_rank\", \"test_issue_11434\", \"test_rank_regression_from_so\", \"test_replace\", \"test_replace_map\", \"test_atoms\", \"test_pinv_solve\", \"test_gauss_jordan_solve\", \"test_issue_7201\", \"test_free_symbols\", \"test_hermitian\", \"test_doit\", \"test_issue_9457_9467_9876\", \"test_issue_9422\", \"test_issue_10770\", \"test_issue_10658\", \"test_partial_pivoting\", \"test_iszero_substitution\"]", + "expected_spans": { + "sympy/matrices/matrices.py": [ + "MatrixProperties._eval_is_upper_hessenberg", + "MatrixProperties.is_upper" + ] + }, + "test_file_spans": { + "sympy/matrices/tests/test_matrices.py": [ + "test_is_upper", + "test_hessenberg" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-12481", + "repo": "sympy/sympy", + "base_commit": "c807dfe7569692cad24f02a08477b70c1679a4dd", + "problem_statement": "`Permutation` constructor fails with non-disjoint cycles\nCalling `Permutation([[0,1],[0,1]])` raises a `ValueError` instead of constructing the identity permutation. If the cycles passed in are non-disjoint, they should be applied in left-to-right order and the resulting permutation should be returned.\r\n\r\nThis should be easy to compute. I don't see a reason why non-disjoint cycles should be forbidden.\n", + "golden_patch": "diff --git a/sympy/combinatorics/permutations.py b/sympy/combinatorics/permutations.py\n--- a/sympy/combinatorics/permutations.py\n+++ b/sympy/combinatorics/permutations.py\n@@ -895,12 +895,8 @@ def __new__(cls, *args, **kwargs):\n # counting starts from 1.\n \n temp = flatten(args)\n- if has_dups(temp):\n- if is_cycle:\n- raise ValueError('there were repeated elements; to resolve '\n- 'cycles use Cycle%s.' % ''.join([str(tuple(c)) for c in args]))\n- else:\n- raise ValueError('there were repeated elements.')\n+ if has_dups(temp) and not is_cycle:\n+ raise ValueError('there were repeated elements.')\n temp = set(temp)\n \n if not is_cycle and \\\n", + "test_patch": "diff --git a/sympy/combinatorics/tests/test_permutations.py b/sympy/combinatorics/tests/test_permutations.py\n--- a/sympy/combinatorics/tests/test_permutations.py\n+++ b/sympy/combinatorics/tests/test_permutations.py\n@@ -339,6 +339,7 @@ def test_args():\n assert Permutation([[1], [4, 2]], size=1) == Permutation([0, 1, 4, 3, 2])\n assert Permutation(\n [[1], [4, 2]], size=6) == Permutation([0, 1, 4, 3, 2, 5])\n+ assert Permutation([[0, 1], [0, 2]]) == Permutation(0, 1, 2)\n assert Permutation([], size=3) == Permutation([0, 1, 2])\n assert Permutation(3).list(5) == [0, 1, 2, 3, 4]\n assert Permutation(3).list(-1) == []\n@@ -349,7 +350,6 @@ def test_args():\n raises(ValueError, lambda: Permutation([[1, 2], 0]))\n # enclosing brackets needed on 0\n raises(ValueError, lambda: Permutation([1, 1, 0]))\n- raises(ValueError, lambda: Permutation([[1], [1, 2]]))\n raises(ValueError, lambda: Permutation([4, 5], size=10)) # where are 0-3?\n # but this is ok because cycles imply that only those listed moved\n assert Permutation(4, 5) == Permutation([0, 1, 2, 3, 5, 4])\n", + "fail_to_pass": "[\"test_args\"]", + "pass_to_pass": "[\"test_Permutation\", \"test_josephus\", \"test_ranking\", \"test_mul\", \"test_Cycle\", \"test_from_sequence\", \"test_printing_cyclic\"]", + "expected_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + }, + "test_file_spans": { + "sympy/combinatorics/tests/test_permutations.py": [ + "test_args" ] }, "resolved_by": [ { - "name": "20240402_sweagent_claude3opus", - "patch": "diff --git a/django/http/response.py b/django/http/response.py\nindex 6a84e193ba..3dd6b1caed 100644\n--- a/django/http/response.py\n+++ b/django/http/response.py\n@@ -233,6 +233,9 @@ class HttpResponseBase:\n return bytes(value)\n if isinstance(value, str):\n return bytes(value.encode(self.charset))\n+ # Handle memoryview objects.\n+ if isinstance(value, memoryview):\n+ return bytes(value)\n # Handle non-string types.\n return str(value).encode(self.charset)\n \n" + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + }, + "alternative_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + } }, { - "name": "20240402_sweagent_gpt4", - "patch": "diff --git a/django/http/response.py b/django/http/response.py\nindex 6a84e193ba..d73ab3af78 100644\n--- a/django/http/response.py\n+++ b/django/http/response.py\n@@ -309,13 +309,15 @@ class HttpResponse(HttpResponseBase):\n @content.setter\n def content(self, value):\n # Consume iterators upon assignment to allow repeated iteration.\n- if hasattr(value, '__iter__') and not isinstance(value, (bytes, str)):\n+ if hasattr(value, '__iter__') and not isinstance(value, (bytes, str, memoryview)):\n content = b''.join(self.make_bytes(chunk) for chunk in value)\n if hasattr(value, 'close'):\n try:\n value.close()\n except Exception:\n pass\n+ elif isinstance(value, memoryview):\n+ content = value.tobytes()\n else:\n content = self.make_bytes(value)\n # Create a list of properly encoded bytestrings to support write().\n" + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + }, + "alternative_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + } }, { - "name": "20240523_aider", - "patch": "diff --git a/django/http/response.py b/django/http/response.py\nindex 6a84e193ba..136b0ad822 100644\n--- a/django/http/response.py\n+++ b/django/http/response.py\n@@ -233,6 +233,12 @@ class HttpResponseBase:\n return bytes(value)\n if isinstance(value, str):\n return bytes(value.encode(self.charset))\n+ # Handle memoryview types.\n+ if isinstance(value, memoryview):\n+ return value.tobytes()\n+ # Handle memoryview types.\n+ if isinstance(value, memoryview):\n+ return value.tobytes()\n # Handle non-string types.\n return str(value).encode(self.charset)\n \n" + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sympy/combinatorics/permutations.py": [ + "imports", + "_af_rmuln", + "_af_pow", + "_af_commutes_with", + "Cycle.__missing__", + "Cycle.__init__", + "Permutation", + "Permutation.__new__", + "Permutation._hashable_content", + "Permutation.__repr__", + "Permutation.list", + "Permutation.rmul", + "Permutation.__rmul__", + "Permutation.__mul__", + "Permutation.__rxor__", + "Permutation.__xor__", + "Permutation.__call__", + "Permutation.unrank_nonlex", + "Permutation.rank_nonlex", + "Permutation.rank", + "Permutation.parity", + "Permutation.commutator", + "Permutation.order", + "Permutation.length", + "Permutation.cycle_structure", + "Permutation.index", + "Permutation.inversion_vector", + "Permutation.rank_trotterjohnson", + "Permutation.unrank_trotterjohnson", + "Permutation.next_trotterjohnson", + "Permutation.get_precedence_distance", + "Permutation.get_adjacency_distance", + "Permutation.get_positional_distance", + "Permutation.from_inversion_vector", + "Permutation.unrank_lex", + "_merge", + "impl" + ] + }, + "alternative_spans": { + "sympy/combinatorics/permutations.py": [ + "imports", + "_af_rmuln", + "_af_pow", + "_af_commutes_with", + "Cycle.__missing__", + "Cycle.__init__", + "Permutation", + "Permutation.__new__", + "Permutation._hashable_content", + "Permutation.__repr__", + "Permutation.list", + "Permutation.rmul", + "Permutation.__rmul__", + "Permutation.__mul__", + "Permutation.__rxor__", + "Permutation.__xor__", + "Permutation.__call__", + "Permutation.unrank_nonlex", + "Permutation.rank_nonlex", + "Permutation.rank", + "Permutation.parity", + "Permutation.commutator", + "Permutation.order", + "Permutation.length", + "Permutation.cycle_structure", + "Permutation.index", + "Permutation.inversion_vector", + "Permutation.rank_trotterjohnson", + "Permutation.unrank_trotterjohnson", + "Permutation.next_trotterjohnson", + "Permutation.get_precedence_distance", + "Permutation.get_adjacency_distance", + "Permutation.get_positional_distance", + "Permutation.from_inversion_vector", + "Permutation.unrank_lex", + "_merge", + "impl" + ] + } }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/django/http/response.py\n+++ b/django/http/response.py\n@@ -309,6 +309,8 @@\n @content.setter\n def content(self, value):\n # Consume iterators upon assignment to allow repeated iteration.\n+ if isinstance(value, memoryview):\n+ value = value.tobytes()\n if hasattr(value, '__iter__') and not isinstance(value, (bytes, str)):\n content = b''.join(self.make_bytes(chunk) for chunk in value)\n if hasattr(value, 'close'):\n" + "name": "20240820_honeycomb", + "updated_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + }, + "alternative_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + } }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/django/http/response.py b/django/http/response.py\nindex 6a84e193ba..6da99ec157 100644\n--- a/django/http/response.py\n+++ b/django/http/response.py\n@@ -180,7 +180,8 @@ class HttpResponseBase:\n max_age = max(0, delta.days * 86400 + delta.seconds)\n else:\n self.cookies[key]['expires'] = expires\n- else:\n+ if isinstance(value, memoryview):\n+ content = value.tobytes()\n self.cookies[key]['expires'] = ''\n if max_age is not None:\n self.cookies[key]['max-age'] = max_age\n@@ -309,7 +310,9 @@ class HttpResponse(HttpResponseBase):\n @content.setter\n def content(self, value):\n # Consume iterators upon assignment to allow repeated iteration.\n- if hasattr(value, '__iter__') and not isinstance(value, (bytes, str)):\n+ if isinstance(value, memoryview):\n+ content = value.tobytes()\n+ elif hasattr(value, '__iter__') and not isinstance(value, (bytes, str)):\n content = b''.join(self.make_bytes(chunk) for chunk in value)\n if hasattr(value, 'close'):\n try:\n" + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + }, + "alternative_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + } }, { - "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/django/http/response.py b/django/http/response.py\nindex 6a84e193ba..99be83310d 100644\n--- a/django/http/response.py\n+++ b/django/http/response.py\n@@ -317,7 +317,11 @@ class HttpResponse(HttpResponseBase):\n except Exception:\n pass\n else:\n- content = self.make_bytes(value)\n+ # Check if value is a memoryview instance and convert it to bytes\n+ if isinstance(value, memoryview):\n+ content = bytes(value)\n+ else:\n+ content = self.make_bytes(value)\n # Create a list of properly encoded bytestrings to support write().\n self._container = [content]\n \n" + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sympy/combinatorics/permutations.py": [ + "Cycle.__call__", + "Permutation.__new__" + ] + }, + "alternative_spans": { + "sympy/combinatorics/permutations.py": [ + "Cycle.__call__", + "Permutation.__new__" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/django/http/response.py b/django/http/response.py\nindex 6a84e193ba..05da20567f 100644\n--- a/django/http/response.py\n+++ b/django/http/response.py\n@@ -233,6 +233,9 @@ class HttpResponseBase:\n return bytes(value)\n if isinstance(value, str):\n return bytes(value.encode(self.charset))\n+ # Handle memoryview type.\n+ if isinstance(value, memoryview):\n+ return value.tobytes()\n # Handle non-string types.\n return str(value).encode(self.charset)\n \n" + "name": "20240829_Isoform", + "updated_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + }, + "alternative_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + } }, { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/django/http/response.py b/django/http/response.py\nindex 6a84e193ba..471577d2dd 100644\n--- a/django/http/response.py\n+++ b/django/http/response.py\n@@ -309,6 +309,8 @@ class HttpResponse(HttpResponseBase):\n @content.setter\n def content(self, value):\n # Consume iterators upon assignment to allow repeated iteration.\n+ if isinstance(value, memoryview):\n+ value = value.tobytes()\n if hasattr(value, '__iter__') and not isinstance(value, (bytes, str)):\n content = b''.join(self.make_bytes(chunk) for chunk in value)\n if hasattr(value, 'close'):\n" - }, + "name": "20240604_CodeR", + "updated_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + }, + "alternative_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + } + }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "diff --git a/django/http/response.py b/django/http/response.py\nindex 6a84e193ba..d73ab3af78 100644\n--- a/django/http/response.py\n+++ b/django/http/response.py\n@@ -309,13 +309,15 @@ class HttpResponse(HttpResponseBase):\n @content.setter\n def content(self, value):\n # Consume iterators upon assignment to allow repeated iteration.\n- if hasattr(value, '__iter__') and not isinstance(value, (bytes, str)):\n+ if hasattr(value, '__iter__') and not isinstance(value, (bytes, str, memoryview)):\n content = b''.join(self.make_bytes(chunk) for chunk in value)\n if hasattr(value, 'close'):\n try:\n value.close()\n except Exception:\n pass\n+ elif isinstance(value, memoryview):\n+ content = value.tobytes()\n else:\n content = self.make_bytes(value)\n # Create a list of properly encoded bytestrings to support write().\n" - } - ], - "alternative_spans": [ + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + }, + "alternative_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + } + }, { - "run_name": "20240402_sweagent_gpt4", - "spans": { - "django/http/response.py": [ - "HttpResponse.content_3" + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + }, + "alternative_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" ] } }, { - "run_name": "20240524_opencsg_starship_gpt4", - "spans": { - "django/http/response.py": [ - "HttpResponse.content_3" + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + }, + "alternative_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" ] } }, { - "run_name": "20240527_marscode-agent-dev", - "spans": { - "django/http/response.py": [ - "HttpResponseBase.set_cookie", - "HttpResponse.content_3" + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + }, + "alternative_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" ] } }, { - "run_name": "20240530_autocoderover-v20240408", - "spans": { - "django/http/response.py": [ - "HttpResponse.content_3" + "name": "20240617_factory_code_droid", + "updated_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + }, + "alternative_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" ] } }, { - "run_name": "20240609_moatless_gpt4o", - "spans": { - "django/http/response.py": [ - "HttpResponse.content_3" + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + }, + "alternative_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" ] } }, { - "run_name": "20240612_IBM_Research_Agent101", - "spans": { - "django/http/response.py": [ - "HttpResponse.content_3" + "name": "20240811_gru", + "updated_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" + ] + }, + "alternative_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.__new__" ] } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sympy/combinatorics/tests/test_permutations.py": [ + "test_args" + ] + }, + "alternative_spans": {} } - ] + ], + "alternative_spans": [] }, { - "instance_id": "sympy__sympy-16503", + "instance_id": "sympy__sympy-13031", "repo": "sympy/sympy", - "base_commit": "a7e6f093c98a3c4783848a19fce646e32b6e0161", - "problem_statement": "Bad centering for Sum pretty print\n```\r\n>>> pprint(Sum(x, (x, 1, oo)) + 3)\r\n \u221e\r\n ___\r\n \u2572\r\n \u2572 x\r\n \u2571 + 3\r\n \u2571\r\n \u203e\u203e\u203e\r\nx = 1\r\n```\r\n\r\nThe `x` and the `+ 3` should be aligned. I'm not sure if the `x` should be lower of if the `+ 3` should be higher. \n", - "golden_patch": "diff --git a/sympy/printing/pretty/pretty.py b/sympy/printing/pretty/pretty.py\n--- a/sympy/printing/pretty/pretty.py\n+++ b/sympy/printing/pretty/pretty.py\n@@ -564,7 +564,7 @@ def adjust(s, wid=None, how='<^>'):\n for i in reversed(range(1, d)):\n lines.append('%s/%s' % (' '*i, ' '*(w - i)))\n lines.append(\"/\" + \"_\"*(w - 1) + ',')\n- return d, h + more, lines, 0\n+ return d, h + more, lines, more\n else:\n w = w + more\n d = d + more\n@@ -619,7 +619,7 @@ def adjust(s, wid=None, how='<^>'):\n if first:\n # change F baseline so it centers on the sign\n prettyF.baseline -= d - (prettyF.height()//2 -\n- prettyF.baseline) - adjustment\n+ prettyF.baseline)\n first = False\n \n # put padding to the right\n@@ -629,7 +629,11 @@ def adjust(s, wid=None, how='<^>'):\n # put the present prettyF to the right\n prettyF = prettyForm(*prettySign.right(prettyF))\n \n- prettyF.baseline = max_upper + sign_height//2\n+ # adjust baseline of ascii mode sigma with an odd height so that it is\n+ # exactly through the center\n+ ascii_adjustment = ascii_mode if not adjustment else 0\n+ prettyF.baseline = max_upper + sign_height//2 + ascii_adjustment\n+\n prettyF.binding = prettyForm.MUL\n return prettyF\n \n", + "base_commit": "2dfa7457f20ee187fbb09b5b6a1631da4458388c", + "problem_statement": "Behavior of Matrix hstack and vstack changed in sympy 1.1\nIn sympy 1.0:\r\n```\r\nimport sympy as sy\r\nM1 = sy.Matrix.zeros(0, 0)\r\nM2 = sy.Matrix.zeros(0, 1)\r\nM3 = sy.Matrix.zeros(0, 2)\r\nM4 = sy.Matrix.zeros(0, 3)\r\nsy.Matrix.hstack(M1, M2, M3, M4).shape\r\n```\r\nreturns \r\n`(0, 6)`\r\n\r\nNow, same in sympy 1.1:\r\n```\r\nimport sympy as sy\r\nM1 = sy.Matrix.zeros(0, 0)\r\nM2 = sy.Matrix.zeros(0, 1)\r\nM3 = sy.Matrix.zeros(0, 2)\r\nM4 = sy.Matrix.zeros(0, 3)\r\nsy.Matrix.hstack(M1, M2, M3, M4).shape\r\n```\r\nreturns\r\n`(0, 3)\r\n`\r\nwhereas:\r\n```\r\nimport sympy as sy\r\nM1 = sy.Matrix.zeros(1, 0)\r\nM2 = sy.Matrix.zeros(1, 1)\r\nM3 = sy.Matrix.zeros(1, 2)\r\nM4 = sy.Matrix.zeros(1, 3)\r\nsy.Matrix.hstack(M1, M2, M3, M4).shape\r\n```\r\nreturns\r\n`(1, 6)\r\n`\n", + "golden_patch": "diff --git a/sympy/matrices/sparse.py b/sympy/matrices/sparse.py\n--- a/sympy/matrices/sparse.py\n+++ b/sympy/matrices/sparse.py\n@@ -985,8 +985,10 @@ def col_join(self, other):\n >>> C == A.row_insert(A.rows, Matrix(B))\n True\n \"\"\"\n- if not self:\n- return type(self)(other)\n+ # A null matrix can always be stacked (see #10770)\n+ if self.rows == 0 and self.cols != other.cols:\n+ return self._new(0, other.cols, []).col_join(other)\n+\n A, B = self, other\n if not A.cols == B.cols:\n raise ShapeError()\n@@ -1191,8 +1193,10 @@ def row_join(self, other):\n >>> C == A.col_insert(A.cols, B)\n True\n \"\"\"\n- if not self:\n- return type(self)(other)\n+ # A null matrix can always be stacked (see #10770)\n+ if self.cols == 0 and self.rows != other.rows:\n+ return self._new(other.rows, 0, []).row_join(other)\n+\n A, B = self, other\n if not A.rows == B.rows:\n raise ShapeError()\n", + "test_patch": "diff --git a/sympy/matrices/tests/test_sparse.py b/sympy/matrices/tests/test_sparse.py\n--- a/sympy/matrices/tests/test_sparse.py\n+++ b/sympy/matrices/tests/test_sparse.py\n@@ -26,6 +26,12 @@ def sparse_zeros(n):\n assert type(a.row_join(b)) == type(a)\n assert type(a.col_join(b)) == type(a)\n \n+ # make sure 0 x n matrices get stacked correctly\n+ sparse_matrices = [SparseMatrix.zeros(0, n) for n in range(4)]\n+ assert SparseMatrix.hstack(*sparse_matrices) == Matrix(0, 6, [])\n+ sparse_matrices = [SparseMatrix.zeros(n, 0) for n in range(4)]\n+ assert SparseMatrix.vstack(*sparse_matrices) == Matrix(6, 0, [])\n+\n # test element assignment\n a = SparseMatrix((\n (1, 0),\n", + "fail_to_pass": "[\"test_sparse_matrix\"]", + "pass_to_pass": "[\"test_transpose\", \"test_trace\", \"test_CL_RL\", \"test_add\", \"test_errors\", \"test_len\", \"test_sparse_zeros_sparse_eye\", \"test_copyin\", \"test_sparse_solve\"]", "expected_spans": { - "sympy/printing/pretty/pretty.py": [ - "PrettyPrinter._print_Sum" + "sympy/matrices/sparse.py": [ + "MutableSparseMatrix.col_join", + "MutableSparseMatrix.row_join" ] }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "django__django-11179", - "repo": "django/django", - "base_commit": "19fc6376ce67d01ca37a91ef2f55ef769f50513a", - "problem_statement": "delete() on instances of models without any dependencies doesn't clear PKs.\nDescription\n\t\nDeleting any model with no dependencies not updates the PK on the model. It should be set to None after .delete() call.\nSee Django.db.models.deletion:276-281. Should update the model line 280.\n", - "golden_patch": "diff --git a/django/db/models/deletion.py b/django/db/models/deletion.py\n--- a/django/db/models/deletion.py\n+++ b/django/db/models/deletion.py\n@@ -277,6 +277,7 @@ def delete(self):\n if self.can_fast_delete(instance):\n with transaction.mark_for_rollback_on_error():\n count = sql.DeleteQuery(model).delete_batch([instance.pk], self.using)\n+ setattr(instance, model._meta.pk.attname, None)\n return count, {model._meta.label: count}\n \n with transaction.atomic(using=self.using, savepoint=False):\n", - "expected_spans": { - "django/db/models/deletion.py": [ - "Collector.delete" + "test_file_spans": { + "sympy/matrices/tests/test_sparse.py": [ + "test_sparse_matrix" ] }, "resolved_by": [ { "name": "20240523_aider", - "patch": "diff --git a/django/db/models/deletion.py b/django/db/models/deletion.py\nindex 92fe4affb9..3a462f1c4c 100644\n--- a/django/db/models/deletion.py\n+++ b/django/db/models/deletion.py\n@@ -277,6 +277,7 @@ class Collector:\n if self.can_fast_delete(instance):\n with transaction.mark_for_rollback_on_error():\n count = sql.DeleteQuery(model).delete_batch([instance.pk], self.using)\n+ setattr(instance, model._meta.pk.attname, None)\n return count, {model._meta.label: count}\n \n with transaction.atomic(using=self.using, savepoint=False):\n" + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping.hstack", + "MatrixShaping", + "MatrixShaping.vstack" + ] + }, + "alternative_spans": {} }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/django/db/models/deletion.py\n+++ b/django/db/models/deletion.py\n@@ -277,6 +277,7 @@\n if self.can_fast_delete(instance):\n with transaction.mark_for_rollback_on_error():\n count = sql.DeleteQuery(model).delete_batch([instance.pk], self.using)\n+ instance.pk = None\n return count, {model._meta.label: count}\n \n with transaction.atomic(using=self.using, savepoint=False):\n" + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/django/db/models/base.py b/django/db/models/base.py\nindex f2560117d2..a5e2706962 100644\n--- a/django/db/models/base.py\n+++ b/django/db/models/base.py\n@@ -916,7 +916,9 @@ class Model(metaclass=ModelBase):\n \n collector = Collector(using=using)\n collector.collect([self], keep_parents=keep_parents)\n- return collector.delete()\n+ result = collector.delete()\n+ self.pk = None\n+ return result\n \n delete.alters_data = True\n \n" + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping.hstack", + "MatrixShaping.row_join", + "MatrixShaping", + "MatrixShaping.vstack" + ] + }, + "alternative_spans": {} }, { - "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/django/db/models/base.py b/django/db/models/base.py\nindex f2560117d2..0bf0769eeb 100644\n--- a/django/db/models/base.py\n+++ b/django/db/models/base.py\n@@ -916,7 +916,10 @@ class Model(metaclass=ModelBase):\n \n collector = Collector(using=using)\n collector.collect([self], keep_parents=keep_parents)\n- return collector.delete()\n+ result = collector.delete()\n+ # Set the primary key to None after successful deletion\n+ self.pk = None\n+ return result\n \n delete.alters_data = True\n \n" + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sympy/matrices/common.py": [ + "docstring", + "imports", + "MatrixRequired._new", + "MatrixRequired.__eq__", + "MatrixRequired.__getitem__", + "MatrixRequired.__len__", + "MatrixShaping", + "MatrixShaping._eval_col_del", + "MatrixShaping._eval_col_insert", + "MatrixShaping._eval_col_join", + "MatrixShaping._eval_extract", + "MatrixShaping._eval_get_diag_blocks", + "MatrixShaping._eval_row_del", + "MatrixShaping._eval_row_join", + "MatrixShaping._eval_tolist", + "MatrixShaping._eval_vec", + "MatrixShaping.col_del", + "MatrixShaping.col_insert", + "MatrixShaping.col_join", + "MatrixShaping.extract", + "MatrixShaping.hstack", + "MatrixShaping.reshape", + "MatrixShaping.row_del", + "MatrixShaping.row_insert", + "MatrixShaping.row_join", + "MatrixShaping.shape", + "MatrixShaping.vstack", + "MatrixSpecial._eval_diag", + "MatrixSpecial._eval_eye", + "MatrixSpecial._eval_jordan_block", + "MatrixSpecial._eval_ones", + "MatrixSpecial._eval_zeros", + "MatrixSpecial.diag", + "MatrixSpecial.eye", + "MatrixSpecial.jordan_block", + "MatrixSpecial.ones", + "MatrixSpecial.zeros", + "MatrixProperties._eval_is_anti_symmetric", + "MatrixProperties._eval_is_diagonal", + "MatrixProperties._eval_is_matrix_hermitian", + "MatrixProperties._eval_is_Identity", + "MatrixProperties._eval_is_lower_hessenberg", + "MatrixProperties._eval_is_lower", + "MatrixProperties._eval_is_symbolic", + "MatrixProperties._eval_is_symmetric", + "MatrixProperties._eval_is_upper_hessenberg", + "MatrixProperties.atoms", + "MatrixProperties.is_anti_symmetric", + "MatrixProperties.is_hermitian", + "MatrixProperties.is_lower_hessenberg", + "MatrixProperties.is_symmetric", + "MatrixProperties.is_upper", + "MatrixOperations._eval_as_real_imag", + "MatrixOperations._eval_conjugate", + "MatrixOperations._eval_permute_cols", + "MatrixOperations._eval_permute_rows", + "MatrixOperations.applyfunc", + "MatrixOperations.expand", + "MatrixOperations.permute", + "MatrixOperations.subs", + "MatrixOperations:3", + "MatrixOperations.xreplace", + "MatrixOperations:9", + "MatrixArithmetic", + "MatrixArithmetic._eval_Abs", + "MatrixArithmetic._eval_add", + "MatrixArithmetic._eval_matrix_mul", + "MatrixArithmetic._eval_matrix_mul_elementwise", + "MatrixArithmetic._eval_matrix_rmul", + "MatrixArithmetic._eval_scalar_mul", + "MatrixArithmetic._eval_scalar_rmul", + "MatrixArithmetic.__add__", + "MatrixArithmetic.__mul__", + "MatrixArithmetic.__pow__", + "MatrixArithmetic.__rmul__", + "MatrixArithmetic.__rsub__", + "MatrixArithmetic.__sub__", + "MatrixArithmetic.multiply_elementwise", + "MatrixCommon", + "_MinimalMatrix", + "_MinimalMatrix.__init__", + "_MinimalMatrix.__getitem__", + "_MinimalMatrix.__len__", + "_MinimalMatrix.__repr__", + "_MinimalMatrix.shape", + "a2idx", + "classof" + ] + }, + "alternative_spans": {} }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/django/db/models/deletion.py b/django/db/models/deletion.py\nindex 92fe4affb9..5ebfab3915 100644\n--- a/django/db/models/deletion.py\n+++ b/django/db/models/deletion.py\n@@ -277,6 +277,7 @@ class Collector:\n if self.can_fast_delete(instance):\n with transaction.mark_for_rollback_on_error():\n count = sql.DeleteQuery(model).delete_batch([instance.pk], self.using)\n+ instance.pk = None\n return count, {model._meta.label: count}\n \n with transaction.atomic(using=self.using, savepoint=False):\n" + "name": "20240829_Isoform", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping.hstack", + "MatrixShaping", + "MatrixShaping.vstack" + ] + }, + "alternative_spans": {} }, { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/django/db/models/deletion.py b/django/db/models/deletion.py\nindex 92fe4affb9..3a462f1c4c 100644\n--- a/django/db/models/deletion.py\n+++ b/django/db/models/deletion.py\n@@ -277,6 +277,7 @@ class Collector:\n if self.can_fast_delete(instance):\n with transaction.mark_for_rollback_on_error():\n count = sql.DeleteQuery(model).delete_batch([instance.pk], self.using)\n+ setattr(instance, model._meta.pk.attname, None)\n return count, {model._meta.label: count}\n \n with transaction.atomic(using=self.using, savepoint=False):\n" + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping.hstack", + "MatrixShaping", + "MatrixShaping.vstack" + ] + }, + "alternative_spans": {} }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/django/db/models/deletion.py b/django/db/models/deletion.py\nindex 92fe4affb9..947decc4f5 100644\n--- a/django/db/models/deletion.py\n+++ b/django/db/models/deletion.py\n@@ -277,7 +277,8 @@ class Collector:\n if self.can_fast_delete(instance):\n with transaction.mark_for_rollback_on_error():\n count = sql.DeleteQuery(model).delete_batch([instance.pk], self.using)\n- return count, {model._meta.label: count}\n+ instance.pk = None\n+ return count, {model._meta.label: count}\n \n with transaction.atomic(using=self.using, savepoint=False):\n # send pre_delete signals\ndiff --git a/jedi_info_django.db.models.deletion.txt b/jedi_info_django.db.models.deletion.txt\nnew file mode 100644\nindex 0000000000..147c0933dd\n--- /dev/null\n+++ b/jedi_info_django.db.models.deletion.txt\n@@ -0,0 +1,5 @@\n+Could not find information for django.db.models.deletion.delete\n+Trying django.db.models.deletion\n+********************************************************************************\n+Information about django.db.models.deletion\n+Defined in file: /django__django/django/db/models/deletion.py\n\\ No newline at end of file\n" + "name": "20240811_gru", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping.hstack", + "MatrixShaping", + "MatrixShaping.vstack" + ] + }, + "alternative_spans": {} } ], "alternative_spans": [] }, { - "instance_id": "scikit-learn__scikit-learn-13584", - "repo": "scikit-learn/scikit-learn", - "base_commit": "0e3c1879b06d839171b7d0a607d71bbb19a966a9", - "problem_statement": "bug in print_changed_only in new repr: vector values\n```python\r\nimport sklearn\r\nimport numpy as np\r\nfrom sklearn.linear_model import LogisticRegressionCV\r\nsklearn.set_config(print_changed_only=True)\r\nprint(LogisticRegressionCV(Cs=np.array([0.1, 1])))\r\n```\r\n> ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()\r\n\r\nping @NicolasHug \r\n\n", - "golden_patch": "diff --git a/sklearn/utils/_pprint.py b/sklearn/utils/_pprint.py\n--- a/sklearn/utils/_pprint.py\n+++ b/sklearn/utils/_pprint.py\n@@ -95,7 +95,7 @@ def _changed_params(estimator):\n init_params = signature(init_func).parameters\n init_params = {name: param.default for name, param in init_params.items()}\n for k, v in params.items():\n- if (v != init_params[k] and\n+ if (repr(v) != repr(init_params[k]) and\n not (is_scalar_nan(init_params[k]) and is_scalar_nan(v))):\n filtered_params[k] = v\n return filtered_params\n", + "instance_id": "sympy__sympy-13043", + "repo": "sympy/sympy", + "base_commit": "a3389a25ec84d36f5cf04a4f2562d820f131db64", + "problem_statement": "decompose() function in intpoly returns a list of arbitrary order\nThe decompose() function, with separate=True, returns `list(poly_dict.values())`, which is ordered arbitrarily. \r\n\r\nWhat is this used for? It should be sorted somehow, or returning a set (in which case, why not just use the returned dictionary and have the caller take the values). This is causing test failures for me after some changes to the core. \r\n\r\nCC @ArifAhmed1995 @certik \n", + "golden_patch": "diff --git a/sympy/integrals/intpoly.py b/sympy/integrals/intpoly.py\n--- a/sympy/integrals/intpoly.py\n+++ b/sympy/integrals/intpoly.py\n@@ -556,7 +556,7 @@ def decompose(expr, separate=False):\n >>> decompose(x**2 + x*y + x + y + x**3*y**2 + y**5)\n {1: x + y, 2: x**2 + x*y, 5: x**3*y**2 + y**5}\n >>> decompose(x**2 + x*y + x + y + x**3*y**2 + y**5, True)\n- [x, y, x**2, y**5, x*y, x**3*y**2]\n+ {x, x**2, y, y**5, x*y, x**3*y**2}\n \"\"\"\n expr = S(expr)\n poly_dict = {}\n@@ -569,7 +569,7 @@ def decompose(expr, separate=False):\n degrees = [(sum(degree_list(monom, *symbols)), monom)\n for monom in expr.args]\n if separate:\n- return [monom[1] for monom in degrees]\n+ return {monom[1] for monom in degrees}\n else:\n for monom in degrees:\n degree, term = monom\n@@ -593,7 +593,7 @@ def decompose(expr, separate=False):\n poly_dict[0] = expr\n \n if separate:\n- return list(poly_dict.values())\n+ return set(poly_dict.values())\n return poly_dict\n \n \n", + "test_patch": "diff --git a/sympy/integrals/tests/test_intpoly.py b/sympy/integrals/tests/test_intpoly.py\n--- a/sympy/integrals/tests/test_intpoly.py\n+++ b/sympy/integrals/tests/test_intpoly.py\n@@ -26,15 +26,15 @@ def test_decompose():\n assert decompose(9*x**2 + y + 4*x + x**3 + y**2*x + 3) ==\\\n {0: 3, 1: 4*x + y, 2: 9*x**2, 3: x**3 + x*y**2}\n \n- assert decompose(x, True) == [x]\n- assert decompose(x ** 2, True) == [x ** 2]\n- assert decompose(x * y, True) == [x * y]\n- assert decompose(x + y, True) == [x, y]\n- assert decompose(x ** 2 + y, True) == [y, x ** 2]\n- assert decompose(8 * x ** 2 + 4 * y + 7, True) == [7, 4*y, 8*x**2]\n- assert decompose(x ** 2 + 3 * y * x, True) == [x ** 2, 3 * x * y]\n+ assert decompose(x, True) == {x}\n+ assert decompose(x ** 2, True) == {x**2}\n+ assert decompose(x * y, True) == {x * y}\n+ assert decompose(x + y, True) == {x, y}\n+ assert decompose(x ** 2 + y, True) == {y, x ** 2}\n+ assert decompose(8 * x ** 2 + 4 * y + 7, True) == {7, 4*y, 8*x**2}\n+ assert decompose(x ** 2 + 3 * y * x, True) == {x ** 2, 3 * x * y}\n assert decompose(9 * x ** 2 + y + 4 * x + x ** 3 + y ** 2 * x + 3, True) == \\\n- [3, y, x**3, 4*x, 9*x**2, x*y**2]\n+ {3, y, 4*x, 9*x**2, x*y**2, x**3}\n \n \n def test_best_origin():\n", + "fail_to_pass": "[\"test_decompose\"]", + "pass_to_pass": "[\"test_best_origin\"]", "expected_spans": { - "sklearn/utils/_pprint.py": [ - "_changed_params" + "sympy/integrals/intpoly.py": [ + "decompose" ] }, - "resolved_by": [ - { - "name": "20240402_sweagent_gpt4", - "patch": "diff --git a/sklearn/utils/_pprint.py b/sklearn/utils/_pprint.py\nindex b8686dba5..1a0a14595 100644\n--- a/sklearn/utils/_pprint.py\n+++ b/sklearn/utils/_pprint.py\n@@ -66,6 +66,7 @@ BaseEstimator.__repr__ for pretty-printing estimators\"\"\"\n from inspect import signature\n import pprint\n from collections import OrderedDict\n+import numpy as np # Added import for numpy\n \n from ..base import BaseEstimator\n from .._config import get_config\n@@ -95,10 +96,11 @@ def _changed_params(estimator):\n init_params = signature(init_func).parameters\n init_params = {name: param.default for name, param in init_params.items()}\n for k, v in params.items():\n- if (v != init_params[k] and\n- not (is_scalar_nan(init_params[k]) and is_scalar_nan(v))):\n+ if not np.array_equal(v, init_params[k]) and \\\n+ not (is_scalar_nan(init_params[k]) and is_scalar_nan(v)):\n filtered_params[k] = v\n return filtered_params\n+ return filtered_params\n \n \n class _EstimatorPrettyPrinter(pprint.PrettyPrinter):\n" - }, - { - "name": "20240523_aider", - "patch": "diff --git a/sklearn/utils/_pprint.py b/sklearn/utils/_pprint.py\nindex b8686dba5..c1915eda7 100644\n--- a/sklearn/utils/_pprint.py\n+++ b/sklearn/utils/_pprint.py\n@@ -67,6 +67,7 @@ from inspect import signature\n import pprint\n from collections import OrderedDict\n \n+import numpy as np\n from ..base import BaseEstimator\n from .._config import get_config\n from . import is_scalar_nan\n@@ -95,7 +96,8 @@ def _changed_params(estimator):\n init_params = signature(init_func).parameters\n init_params = {name: param.default for name, param in init_params.items()}\n for k, v in params.items():\n- if (v != init_params[k] and\n+ if (isinstance(v, np.ndarray) and isinstance(init_params[k], np.ndarray) and not np.array_equal(v, init_params[k])) or \\\n+ (not isinstance(v, np.ndarray) and v != init_params[k] and\n not (is_scalar_nan(init_params[k]) and is_scalar_nan(v))):\n filtered_params[k] = v\n return filtered_params\n" - }, - { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/sklearn/utils/_pprint.py\n+++ b/sklearn/utils/_pprint.py\n@@ -65,6 +65,7 @@\n \n from inspect import signature\n import pprint\n+import numpy as np\n from collections import OrderedDict\n \n from ..base import BaseEstimator\n@@ -95,8 +96,7 @@\n init_params = signature(init_func).parameters\n init_params = {name: param.default for name, param in init_params.items()}\n for k, v in params.items():\n- if (v != init_params[k] and\n- not (is_scalar_nan(init_params[k]) and is_scalar_nan(v))):\n+ if not (np.array_equal(np.array(init_params[k]), np.array(v)) if isinstance(v, (np.ndarray, list, tuple)) or isinstance(init_params[k], (np.ndarray, list, tuple)) else v == init_params[k]) and not (is_scalar_nan(init_params[k]) and is_scalar_nan(v)):\n filtered_params[k] = v\n return filtered_params\n \n" - }, - { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/sklearn/utils/_pprint.py b/sklearn/utils/_pprint.py\nindex b8686dba5..efabaaf13 100644\n--- a/sklearn/utils/_pprint.py\n+++ b/sklearn/utils/_pprint.py\n@@ -65,6 +65,7 @@ BaseEstimator.__repr__ for pretty-printing estimators\"\"\"\n \n from inspect import signature\n import pprint\n+import numpy as np\n from collections import OrderedDict\n \n from ..base import BaseEstimator\n@@ -95,8 +96,12 @@ def _changed_params(estimator):\n init_params = signature(init_func).parameters\n init_params = {name: param.default for name, param in init_params.items()}\n for k, v in params.items():\n- if (v != init_params[k] and\n- not (is_scalar_nan(init_params[k]) and is_scalar_nan(v))):\n+ if is_scalar_nan(init_params[k]) and is_scalar_nan(v):\n+ continue\n+ if isinstance(v, np.ndarray):\n+ if (v != init_params[k]).any():\n+ filtered_params[k] = v\n+ elif v != init_params[k]:\n filtered_params[k] = v\n return filtered_params\n \n" - }, - { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/.coveragerc b/.coveragerc\ndeleted file mode 100644\nindex 6d76a5bca..000000000\n--- a/.coveragerc\n+++ /dev/null\n@@ -1,8 +0,0 @@\n-[run]\n-branch = True\n-source = sklearn\n-include = */sklearn/*\n-omit =\n- */sklearn/externals/*\n- */benchmarks/*\n- */setup.py\ndiff --git a/sklearn/utils/_pprint.py b/sklearn/utils/_pprint.py\nindex b8686dba5..2f4ac6a4a 100644\n--- a/sklearn/utils/_pprint.py\n+++ b/sklearn/utils/_pprint.py\n@@ -87,6 +87,8 @@ class KeyValTupleParam(KeyValTuple):\n def _changed_params(estimator):\n \"\"\"Return dict (param_name: value) of parameters that were given to\n estimator with non-default values.\"\"\"\n+ \n+ import numpy as np # Import numpy to use array_equal for comparison\n \n params = estimator.get_params(deep=False)\n filtered_params = {}\n@@ -95,7 +97,8 @@ def _changed_params(estimator):\n init_params = signature(init_func).parameters\n init_params = {name: param.default for name, param in init_params.items()}\n for k, v in params.items():\n- if (v != init_params[k] and\n+ # Use numpy.array_equal to compare numpy arrays\n+ if (not np.array_equal(v, init_params[k]) and\n not (is_scalar_nan(init_params[k]) and is_scalar_nan(v))):\n filtered_params[k] = v\n return filtered_params\n" - } - ], + "test_file_spans": { + "sympy/integrals/tests/test_intpoly.py": [ + "test_decompose" + ] + }, + "resolved_by": [], "alternative_spans": [] }, { - "instance_id": "pytest-dev__pytest-5103", - "repo": "pytest-dev/pytest", - "base_commit": "10ca84ffc56c2dd2d9dc4bd71b7b898e083500cd", - "problem_statement": "Unroll the iterable for all/any calls to get better reports\nSometime I need to assert some predicate on all of an iterable, and for that the builtin functions `all`/`any` are great - but the failure messages aren't useful at all!\r\nFor example - the same test written in three ways:\r\n\r\n- A generator expression\r\n```sh \r\n def test_all_even():\r\n even_stevens = list(range(1,100,2))\r\n> assert all(is_even(number) for number in even_stevens)\r\nE assert False\r\nE + where False = all(. at 0x101f82ed0>)\r\n```\r\n- A list comprehension\r\n```sh\r\n def test_all_even():\r\n even_stevens = list(range(1,100,2))\r\n> assert all([is_even(number) for number in even_stevens])\r\nE assert False\r\nE + where False = all([False, False, False, False, False, False, ...])\r\n```\r\n- A for loop\r\n```sh\r\n def test_all_even():\r\n even_stevens = list(range(1,100,2))\r\n for number in even_stevens:\r\n> assert is_even(number)\r\nE assert False\r\nE + where False = is_even(1)\r\n\r\ntest_all_any.py:7: AssertionError\r\n```\r\nThe only one that gives a meaningful report is the for loop - but it's way more wordy, and `all` asserts don't translate to a for loop nicely (I'll have to write a `break` or a helper function - yuck)\r\nI propose the assertion re-writer \"unrolls\" the iterator to the third form, and then uses the already existing reports.\r\n\r\n- [x] Include a detailed description of the bug or suggestion\r\n- [x] `pip list` of the virtual environment you are using\r\n```\r\nPackage Version\r\n-------------- -------\r\natomicwrites 1.3.0 \r\nattrs 19.1.0 \r\nmore-itertools 7.0.0 \r\npip 19.0.3 \r\npluggy 0.9.0 \r\npy 1.8.0 \r\npytest 4.4.0 \r\nsetuptools 40.8.0 \r\nsix 1.12.0 \r\n```\r\n- [x] pytest and operating system versions\r\n`platform darwin -- Python 3.7.3, pytest-4.4.0, py-1.8.0, pluggy-0.9.0`\r\n- [x] Minimal example if possible\r\n\n", - "golden_patch": "diff --git a/src/_pytest/assertion/rewrite.py b/src/_pytest/assertion/rewrite.py\n--- a/src/_pytest/assertion/rewrite.py\n+++ b/src/_pytest/assertion/rewrite.py\n@@ -964,6 +964,8 @@ def visit_Call_35(self, call):\n \"\"\"\n visit `ast.Call` nodes on Python3.5 and after\n \"\"\"\n+ if isinstance(call.func, ast.Name) and call.func.id == \"all\":\n+ return self._visit_all(call)\n new_func, func_expl = self.visit(call.func)\n arg_expls = []\n new_args = []\n@@ -987,6 +989,27 @@ def visit_Call_35(self, call):\n outer_expl = \"%s\\n{%s = %s\\n}\" % (res_expl, res_expl, expl)\n return res, outer_expl\n \n+ def _visit_all(self, call):\n+ \"\"\"Special rewrite for the builtin all function, see #5062\"\"\"\n+ if not isinstance(call.args[0], (ast.GeneratorExp, ast.ListComp)):\n+ return\n+ gen_exp = call.args[0]\n+ assertion_module = ast.Module(\n+ body=[ast.Assert(test=gen_exp.elt, lineno=1, msg=\"\", col_offset=1)]\n+ )\n+ AssertionRewriter(module_path=None, config=None).run(assertion_module)\n+ for_loop = ast.For(\n+ iter=gen_exp.generators[0].iter,\n+ target=gen_exp.generators[0].target,\n+ body=assertion_module.body,\n+ orelse=[],\n+ )\n+ self.statements.append(for_loop)\n+ return (\n+ ast.Num(n=1),\n+ \"\",\n+ ) # Return an empty expression, all the asserts are in the for_loop\n+\n def visit_Starred(self, starred):\n # From Python 3.5, a Starred node can appear in a function call\n res, expl = self.visit(starred.value)\n@@ -997,6 +1020,8 @@ def visit_Call_legacy(self, call):\n \"\"\"\n visit `ast.Call nodes on 3.4 and below`\n \"\"\"\n+ if isinstance(call.func, ast.Name) and call.func.id == \"all\":\n+ return self._visit_all(call)\n new_func, func_expl = self.visit(call.func)\n arg_expls = []\n new_args = []\n", + "instance_id": "sympy__sympy-13146", + "repo": "sympy/sympy", + "base_commit": "b678d8103e48fdb1af335dbf0080b3d5366f2d17", + "problem_statement": "Exponent doesn't fully simplify\nSay I have code like this:\n\n```\nimport sympy\nfrom sympy import *\nx=Symbol('x')\nexpr1 = S(1)/2*x**2.5\nexpr2 = S(1)*x**(S(5)/2)/2\nres = expr1-expr2\nres= simplify(res.evalf(5))\nprint res\n```\n\nThe output is\n`-0.5*x**2.5 + 0.5*x**2.5`\nHow do I simplify it to 0?\n\n", + "golden_patch": "diff --git a/sympy/core/operations.py b/sympy/core/operations.py\n--- a/sympy/core/operations.py\n+++ b/sympy/core/operations.py\n@@ -332,9 +332,7 @@ def _eval_evalf(self, prec):\n args.append(a)\n else:\n args.append(newa)\n- if not _aresame(tuple(args), tail_args):\n- tail = self.func(*args)\n- return self.func(x, tail)\n+ return self.func(x, *args)\n \n # this is the same as above, but there were no pure-number args to\n # deal with\n@@ -345,9 +343,7 @@ def _eval_evalf(self, prec):\n args.append(a)\n else:\n args.append(newa)\n- if not _aresame(tuple(args), self.args):\n- return self.func(*args)\n- return self\n+ return self.func(*args)\n \n @classmethod\n def make_args(cls, expr):\n", + "test_patch": "diff --git a/sympy/core/tests/test_evalf.py b/sympy/core/tests/test_evalf.py\n--- a/sympy/core/tests/test_evalf.py\n+++ b/sympy/core/tests/test_evalf.py\n@@ -227,6 +227,9 @@ def test_evalf_bugs():\n assert ((oo*I).n() == S.Infinity*I)\n assert ((oo+oo*I).n() == S.Infinity + S.Infinity*I)\n \n+ #issue 11518\n+ assert NS(2*x**2.5, 5) == '2.0000*x**2.5000'\n+\n \n def test_evalf_integer_parts():\n a = floor(log(8)/log(2) - exp(-1000), evaluate=False)\n", + "fail_to_pass": "[\"test_evalf_bugs\"]", + "pass_to_pass": "[\"test_evalf_helpers\", \"test_evalf_basic\", \"test_cancellation\", \"test_evalf_powers\", \"test_evalf_rump\", \"test_evalf_complex\", \"test_evalf_complex_powers\", \"test_evalf_exponentiation\", \"test_evalf_complex_cancellation\", \"test_evalf_trig_zero_detection\", \"test_evalf_sum\", \"test_evalf_divergent_series\", \"test_evalf_product\", \"test_evalf_py_methods\", \"test_evalf_power_subs_bugs\", \"test_evalf_arguments\", \"test_implemented_function_evalf\", \"test_evaluate_false\", \"test_evalf_relational\", \"test_issue_5486\", \"test_issue_5486_bug\", \"test_bugs\", \"test_subs\", \"test_old_docstring\", \"test_scaled_zero\", \"test_chop_value\", \"test_infinities\", \"test_to_mpmath\", \"test_issue_4945\", \"test_evalf_integral\", \"test_issue_8821_highprec_from_str\", \"test_issue_8853\", \"test_issue_9326\", \"test_issue_10323\"]", "expected_spans": { - "src/_pytest/assertion/rewrite.py": [ - "AssertionRewriter.visit_Call_35", - "AssertionRewriter.visit_Call_legacy" + "sympy/core/operations.py": [ + "AssocOp._eval_evalf" + ] + }, + "test_file_spans": { + "sympy/core/tests/test_evalf.py": [ + "test_evalf_bugs" ] }, "resolved_by": [], "alternative_spans": [] }, { - "instance_id": "django__django-11283", - "repo": "django/django", - "base_commit": "08a4ee06510ae45562c228eefbdcaac84bd38c7a", - "problem_statement": "Migration auth.0011_update_proxy_permissions fails for models recreated as a proxy.\nDescription\n\t \n\t\t(last modified by Mariusz Felisiak)\n\t \nI am trying to update my project to Django 2.2. When I launch python manage.py migrate, I get this error message when migration auth.0011_update_proxy_permissions is applying (full stacktrace is available \u200bhere):\ndjango.db.utils.IntegrityError: duplicate key value violates unique constraint \"idx_18141_auth_permission_content_type_id_01ab375a_uniq\" DETAIL: Key (co.ntent_type_id, codename)=(12, add_agency) already exists.\nIt looks like the migration is trying to re-create already existing entries in the auth_permission table. At first I though it cloud because we recently renamed a model. But after digging and deleting the entries associated with the renamed model from our database in the auth_permission table, the problem still occurs with other proxy models.\nI tried to update directly from 2.0.13 and 2.1.8. The issues appeared each time. I also deleted my venv and recreated it without an effect.\nI searched for a ticket about this on the bug tracker but found nothing. I also posted this on \u200bdjango-users and was asked to report this here.\n", - "golden_patch": "diff --git a/django/contrib/auth/migrations/0011_update_proxy_permissions.py b/django/contrib/auth/migrations/0011_update_proxy_permissions.py\n--- a/django/contrib/auth/migrations/0011_update_proxy_permissions.py\n+++ b/django/contrib/auth/migrations/0011_update_proxy_permissions.py\n@@ -1,5 +1,18 @@\n-from django.db import migrations\n+import sys\n+\n+from django.core.management.color import color_style\n+from django.db import migrations, transaction\n from django.db.models import Q\n+from django.db.utils import IntegrityError\n+\n+WARNING = \"\"\"\n+ A problem arose migrating proxy model permissions for {old} to {new}.\n+\n+ Permission(s) for {new} already existed.\n+ Codenames Q: {query}\n+\n+ Ensure to audit ALL permissions for {old} and {new}.\n+\"\"\"\n \n \n def update_proxy_model_permissions(apps, schema_editor, reverse=False):\n@@ -7,6 +20,7 @@ def update_proxy_model_permissions(apps, schema_editor, reverse=False):\n Update the content_type of proxy model permissions to use the ContentType\n of the proxy model.\n \"\"\"\n+ style = color_style()\n Permission = apps.get_model('auth', 'Permission')\n ContentType = apps.get_model('contenttypes', 'ContentType')\n for Model in apps.get_models():\n@@ -24,10 +38,16 @@ def update_proxy_model_permissions(apps, schema_editor, reverse=False):\n proxy_content_type = ContentType.objects.get_for_model(Model, for_concrete_model=False)\n old_content_type = proxy_content_type if reverse else concrete_content_type\n new_content_type = concrete_content_type if reverse else proxy_content_type\n- Permission.objects.filter(\n- permissions_query,\n- content_type=old_content_type,\n- ).update(content_type=new_content_type)\n+ try:\n+ with transaction.atomic():\n+ Permission.objects.filter(\n+ permissions_query,\n+ content_type=old_content_type,\n+ ).update(content_type=new_content_type)\n+ except IntegrityError:\n+ old = '{}_{}'.format(old_content_type.app_label, old_content_type.model)\n+ new = '{}_{}'.format(new_content_type.app_label, new_content_type.model)\n+ sys.stdout.write(style.WARNING(WARNING.format(old=old, new=new, query=permissions_query)))\n \n \n def revert_proxy_model_permissions(apps, schema_editor):\n", + "instance_id": "sympy__sympy-13177", + "repo": "sympy/sympy", + "base_commit": "662cfb818e865f580e18b59efbb3540c34232beb", + "problem_statement": "Mod(x**2, x) is not (always) 0\nWhen the base is not an integer, `x**2 % x` is not 0. The base is not tested to be an integer in Mod's eval logic:\r\n\r\n```\r\nif (p == q or p == -q or\r\n p.is_Pow and p.exp.is_Integer and p.base == q or\r\n p.is_integer and q == 1):\r\n return S.Zero\r\n```\r\n\r\nso\r\n\r\n```\r\n>>> Mod(x**2, x)\r\n0\r\n```\r\nbut\r\n```\r\n>>> x = S(1.5)\r\n>>> Mod(x**2, x)\r\n0.75\r\n```\n", + "golden_patch": "diff --git a/sympy/core/mod.py b/sympy/core/mod.py\n--- a/sympy/core/mod.py\n+++ b/sympy/core/mod.py\n@@ -39,7 +39,8 @@ def doit(p, q):\n if p.is_infinite or q.is_infinite or p is nan or q is nan:\n return nan\n if (p == q or p == -q or\n- p.is_Pow and p.exp.is_Integer and p.base == q or\n+ p.is_Pow and p.exp.is_integer and p.base == q and q.is_integer\n+ and p.exp.is_positive or\n p.is_integer and q == 1):\n return S.Zero\n \n", + "test_patch": "diff --git a/sympy/core/tests/test_numbers.py b/sympy/core/tests/test_numbers.py\n--- a/sympy/core/tests/test_numbers.py\n+++ b/sympy/core/tests/test_numbers.py\n@@ -8,6 +8,7 @@\n from sympy.core.logic import fuzzy_not\n from sympy.core.numbers import (igcd, ilcm, igcdex, seterr, _intcache,\n igcd2, igcd_lehmer, mpf_norm, comp, mod_inverse)\n+from sympy.core.mod import Mod\n from sympy.utilities.decorator import conserve_mpmath_dps\n from sympy.utilities.iterables import permutations\n from sympy.utilities.pytest import XFAIL, raises\n@@ -121,6 +122,20 @@ def test_mod():\n assert Integer(10) % 4 == Integer(2)\n assert 15 % Integer(4) == Integer(3)\n \n+ h = Symbol('h')\n+ m = h ** 2 % h\n+ k = h ** -2 % h\n+ l = Symbol('l', integer=True)\n+ p = Symbol('p', integer=True, positive=True)\n+ q = Symbol('q', integer=True, negative=True)\n+\n+ assert m == h * (h % 1)\n+ assert k == Mod(h ** -2, h, evaluate=False)\n+ assert Mod(l ** p, l) == 0\n+ assert Mod(l ** 2, l) == 0\n+ assert (l ** q % l) == Mod(l ** q, l, evaluate=False)\n+ assert (l ** -2 % l) == Mod(l ** -2, l, evaluate=False)\n+\n \n def test_divmod():\n assert divmod(S(12), S(8)) == Tuple(1, 4)\n", + "fail_to_pass": "[\"test_mod\", \"test_mod_inverse\"]", + "pass_to_pass": "[\"test_integers_cache\", \"test_seterr\", \"test_divmod\", \"test_igcd\", \"test_igcd_lehmer\", \"test_igcd2\", \"test_ilcm\", \"test_igcdex\", \"test_Integer_new\", \"test_Rational_new\", \"test_Number_new\", \"test_Rational_cmp\", \"test_Float\", \"test_float_mpf\", \"test_Float_RealElement\", \"test_Float_default_to_highprec_from_str\", \"test_Float_eval\", \"test_Float_issue_2107\", \"test_Infinity\", \"test_Infinity_2\", \"test_Mul_Infinity_Zero\", \"test_Div_By_Zero\", \"test_Infinity_inequations\", \"test_NaN\", \"test_special_numbers\", \"test_powers\", \"test_integer_nthroot_overflow\", \"test_isqrt\", \"test_powers_Rational\", \"test_powers_Float\", \"test_abs1\", \"test_accept_int\", \"test_dont_accept_str\", \"test_int\", \"test_long\", \"test_real_bug\", \"test_bug_sqrt\", \"test_pi_Pi\", \"test_no_len\", \"test_issue_3321\", \"test_issue_3692\", \"test_issue_3423\", \"test_issue_3449\", \"test_Integer_factors\", \"test_Rational_factors\", \"test_issue_4107\", \"test_IntegerInteger\", \"test_Rational_gcd_lcm_cofactors\", \"test_Float_gcd_lcm_cofactors\", \"test_issue_4611\", \"test_conversion_to_mpmath\", \"test_relational\", \"test_Integer_as_index\", \"test_Rational_int\", \"test_zoo\", \"test_issue_4122\", \"test_GoldenRatio_expand\", \"test_as_content_primitive\", \"test_hashing_sympy_integers\", \"test_issue_4172\", \"test_Catalan_EulerGamma_prec\", \"test_Float_eq\", \"test_int_NumberSymbols\", \"test_issue_6640\", \"test_issue_6349\", \"test_mpf_norm\", \"test_latex\", \"test_issue_7742\", \"test_Float_idempotence\", \"test_comp\", \"test_issue_9491\", \"test_issue_10063\", \"test_issue_10020\", \"test_invert_numbers\", \"test_golden_ratio_rewrite_as_sqrt\", \"test_comparisons_with_unknown_type\"]", "expected_spans": { - "django/contrib/auth/migrations/0011_update_proxy_permissions.py": [ + "sympy/core/mod.py": [ + "Mod.eval" + ] + }, + "test_file_spans": { + "sympy/core/tests/test_numbers.py": [ "imports", - "update_proxy_model_permissions" + "test_mod" ] }, "resolved_by": [], "alternative_spans": [] }, { - "instance_id": "scikit-learn__scikit-learn-13779", - "repo": "scikit-learn/scikit-learn", - "base_commit": "b34751b7ed02b2cfcc36037fb729d4360480a299", - "problem_statement": "Voting estimator will fail at fit if weights are passed and an estimator is None\nBecause we don't check for an estimator to be `None` in `sample_weight` support, `fit` is failing`.\r\n\r\n```python\r\n X, y = load_iris(return_X_y=True)\r\n voter = VotingClassifier(\r\n estimators=[('lr', LogisticRegression()),\r\n ('rf', RandomForestClassifier())]\r\n )\r\n voter.fit(X, y, sample_weight=np.ones(y.shape))\r\n voter.set_params(lr=None)\r\n voter.fit(X, y, sample_weight=np.ones(y.shape))\r\n```\r\n\r\n```\r\nAttributeError: 'NoneType' object has no attribute 'fit'\r\n```\n", - "golden_patch": "diff --git a/sklearn/ensemble/voting.py b/sklearn/ensemble/voting.py\n--- a/sklearn/ensemble/voting.py\n+++ b/sklearn/ensemble/voting.py\n@@ -78,6 +78,8 @@ def fit(self, X, y, sample_weight=None):\n \n if sample_weight is not None:\n for name, step in self.estimators:\n+ if step is None:\n+ continue\n if not has_fit_parameter(step, 'sample_weight'):\n raise ValueError('Underlying estimator \\'%s\\' does not'\n ' support sample weights.' % name)\n", + "instance_id": "sympy__sympy-13437", + "repo": "sympy/sympy", + "base_commit": "674afc619d7f5c519b6a5393a8b0532a131e57e0", + "problem_statement": "bell(n).limit(n, oo) should be oo rather than bell(oo)\n`bell(n).limit(n,oo)` should take the value infinity, but the current output is `bell(oo)`. As the Bell numbers represent the number of partitions of a set, it seems natural that `bell(oo)` should be able to be evaluated rather than be returned unevaluated. This issue is also in line with the recent fixes to the corresponding limit for the Fibonacci numbers and Lucas numbers.\n\n```\nfrom sympy import *\nn = symbols('n')\nbell(n).limit(n,oo)\n\nOutput:\nbell(oo)\n```\n\nI'm new to Sympy, so I'd appreciate the opportunity to fix this bug myself if that's alright.\n\n", + "golden_patch": "diff --git a/sympy/functions/combinatorial/numbers.py b/sympy/functions/combinatorial/numbers.py\n--- a/sympy/functions/combinatorial/numbers.py\n+++ b/sympy/functions/combinatorial/numbers.py\n@@ -424,6 +424,15 @@ def _bell_incomplete_poly(n, k, symbols):\n \n @classmethod\n def eval(cls, n, k_sym=None, symbols=None):\n+ if n is S.Infinity:\n+ if k_sym is None:\n+ return S.Infinity\n+ else:\n+ raise ValueError(\"Bell polynomial is not defined\")\n+\n+ if n.is_negative or n.is_integer is False:\n+ raise ValueError(\"a non-negative integer expected\")\n+\n if n.is_Integer and n.is_nonnegative:\n if k_sym is None:\n return Integer(cls._bell(int(n)))\n", + "test_patch": "diff --git a/sympy/functions/combinatorial/tests/test_comb_numbers.py b/sympy/functions/combinatorial/tests/test_comb_numbers.py\n--- a/sympy/functions/combinatorial/tests/test_comb_numbers.py\n+++ b/sympy/functions/combinatorial/tests/test_comb_numbers.py\n@@ -73,6 +73,11 @@ def test_bell():\n assert bell(1, x) == x\n assert bell(2, x) == x**2 + x\n assert bell(5, x) == x**5 + 10*x**4 + 25*x**3 + 15*x**2 + x\n+ assert bell(oo) == S.Infinity\n+ raises(ValueError, lambda: bell(oo, x))\n+\n+ raises(ValueError, lambda: bell(-1))\n+ raises(ValueError, lambda: bell(S(1)/2))\n \n X = symbols('x:6')\n # X = (x0, x1, .. x5)\n@@ -99,9 +104,9 @@ def test_bell():\n for i in [0, 2, 3, 7, 13, 42, 55]:\n assert bell(i).evalf() == bell(n).rewrite(Sum).evalf(subs={n: i})\n \n- # For negative numbers, the formula does not hold\n- m = Symbol('m', integer=True)\n- assert bell(-1).evalf() == bell(m).rewrite(Sum).evalf(subs={m: -1})\n+ # issue 9184\n+ n = Dummy('n')\n+ assert bell(n).limit(n, S.Infinity) == S.Infinity\n \n \n def test_harmonic():\n", + "fail_to_pass": "[\"test_bell\"]", + "pass_to_pass": "[\"test_bernoulli\", \"test_fibonacci\", \"test_harmonic\", \"test_harmonic_rational\", \"test_harmonic_evalf\", \"test_harmonic_rewrite_polygamma\", \"test_harmonic_rewrite_sum\", \"test_euler\", \"test_euler_odd\", \"test_euler_polynomials\", \"test_euler_polynomial_rewrite\", \"test_catalan\", \"test_genocchi\", \"test_nC_nP_nT\", \"test_issue_8496\"]", "expected_spans": { - "sklearn/ensemble/voting.py": [ - "_BaseVoting.fit" + "sympy/functions/combinatorial/numbers.py": [ + "bell._bell_incomplete_poly" ] }, - "resolved_by": [ - { - "name": "20240402_sweagent_claude3opus", - "patch": "diff --git a/sklearn/ensemble/voting.py b/sklearn/ensemble/voting.py\nindex 7afa7180c..9314ab7b2 100644\n--- a/sklearn/ensemble/voting.py\n+++ b/sklearn/ensemble/voting.py\n@@ -12,7 +12,6 @@ This module contains:\n # Mohamed Ali Jamaoui \n #\n # License: BSD 3 clause\n-\n import numpy as np\n from abc import abstractmethod\n \n@@ -78,7 +77,7 @@ class _BaseVoting(_BaseComposition, TransformerMixin):\n \n if sample_weight is not None:\n for name, step in self.estimators:\n- if not has_fit_parameter(step, 'sample_weight'):\n+ if step is not None and not has_fit_parameter(step, 'sample_weight'):\n raise ValueError('Underlying estimator \\'%s\\' does not'\n ' support sample weights.' % name)\n \n" - }, - { - "name": "20240509_amazon-q-developer-agent-20240430-dev", - "patch": "--- a/sklearn/ensemble/voting.py\n+++ b/sklearn/ensemble/voting.py\n@@ -75,13 +75,13 @@\n raise ValueError('Number of `estimators` and weights must be equal'\n '; got %d weights, %d estimators'\n % (len(self.weights), len(self.estimators)))\n \n if sample_weight is not None:\n for name, step in self.estimators:\n- if not has_fit_parameter(step, 'sample_weight'):\n+ if step is not None and not has_fit_parameter(step, 'sample_weight'):\n raise ValueError('Underlying estimator \\'%s\\' does not'\n ' support sample weights.' % name)\n \n names, clfs = zip(*self.estimators)\n self._validate_names(names)\n \n" - }, - { - "name": "20240523_aider", - "patch": "diff --git a/sklearn/ensemble/voting.py b/sklearn/ensemble/voting.py\nindex 7afa7180c..13accdcec 100644\n--- a/sklearn/ensemble/voting.py\n+++ b/sklearn/ensemble/voting.py\n@@ -78,7 +78,7 @@ class _BaseVoting(_BaseComposition, TransformerMixin):\n \n if sample_weight is not None:\n for name, step in self.estimators:\n- if not has_fit_parameter(step, 'sample_weight'):\n+ if step is not None and not has_fit_parameter(step, 'sample_weight'):\n raise ValueError('Underlying estimator \\'%s\\' does not'\n ' support sample weights.' % name)\n \n" - }, - { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/sklearn/ensemble/voting.py b/sklearn/ensemble/voting.py\nindex 7afa7180c..a2ad157be 100644\n--- a/sklearn/ensemble/voting.py\n+++ b/sklearn/ensemble/voting.py\n@@ -29,10 +29,11 @@ from ..utils import Bunch\n \n def _parallel_fit_estimator(estimator, X, y, sample_weight=None):\n \"\"\"Private function used to fit an estimator within a job.\"\"\"\n- if sample_weight is not None:\n- estimator.fit(X, y, sample_weight=sample_weight)\n- else:\n- estimator.fit(X, y)\n+ if estimator is not None:\n+ if sample_weight is not None:\n+ estimator.fit(X, y, sample_weight=sample_weight)\n+ else:\n+ estimator.fit(X, y)\n return estimator\n \n \n@@ -78,7 +79,7 @@ class _BaseVoting(_BaseComposition, TransformerMixin):\n \n if sample_weight is not None:\n for name, step in self.estimators:\n- if not has_fit_parameter(step, 'sample_weight'):\n+ if step is not None and not has_fit_parameter(step, 'sample_weight'):\n raise ValueError('Underlying estimator \\'%s\\' does not'\n ' support sample weights.' % name)\n \n" - }, - { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/sklearn/ensemble/voting.py b/sklearn/ensemble/voting.py\nindex 7afa7180c..ca94ab1f0 100644\n--- a/sklearn/ensemble/voting.py\n+++ b/sklearn/ensemble/voting.py\n@@ -29,10 +29,11 @@ from ..utils import Bunch\n \n def _parallel_fit_estimator(estimator, X, y, sample_weight=None):\n \"\"\"Private function used to fit an estimator within a job.\"\"\"\n- if sample_weight is not None:\n- estimator.fit(X, y, sample_weight=sample_weight)\n- else:\n- estimator.fit(X, y)\n+ if estimator is not None:\n+ if sample_weight is not None:\n+ estimator.fit(X, y, sample_weight=sample_weight)\n+ else:\n+ estimator.fit(X, y)\n return estimator\n \n \n@@ -77,7 +78,8 @@ class _BaseVoting(_BaseComposition, TransformerMixin):\n % (len(self.weights), len(self.estimators)))\n \n if sample_weight is not None:\n- for name, step in self.estimators:\n+ valid_estimators = [(name, est) for name, est in self.estimators if est is not None]\n+ for name, step in valid_estimators:\n if not has_fit_parameter(step, 'sample_weight'):\n raise ValueError('Underlying estimator \\'%s\\' does not'\n ' support sample weights.' % name)\n" - }, - { - "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/sklearn/ensemble/voting.py b/sklearn/ensemble/voting.py\nindex 7afa7180c..df9c382bf 100644\n--- a/sklearn/ensemble/voting.py\n+++ b/sklearn/ensemble/voting.py\n@@ -78,6 +78,9 @@ class _BaseVoting(_BaseComposition, TransformerMixin):\n \n if sample_weight is not None:\n for name, step in self.estimators:\n+ # Skip fitting if the estimator is None\n+ if step is None:\n+ continue\n if not has_fit_parameter(step, 'sample_weight'):\n raise ValueError('Underlying estimator \\'%s\\' does not'\n ' support sample weights.' % name)\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "pytest-dev__pytest-5221", - "repo": "pytest-dev/pytest", - "base_commit": "4a2fdce62b73944030cff9b3e52862868ca9584d", - "problem_statement": "Display fixture scope with `pytest --fixtures`\nIt would be useful to show fixture scopes with `pytest --fixtures`; currently the only way to learn the scope of a fixture is look at the docs (when that is documented) or at the source code.\n", - "golden_patch": "diff --git a/src/_pytest/python.py b/src/_pytest/python.py\n--- a/src/_pytest/python.py\n+++ b/src/_pytest/python.py\n@@ -1342,17 +1342,19 @@ def _showfixtures_main(config, session):\n currentmodule = module\n if verbose <= 0 and argname[0] == \"_\":\n continue\n+ tw.write(argname, green=True)\n+ if fixturedef.scope != \"function\":\n+ tw.write(\" [%s scope]\" % fixturedef.scope, cyan=True)\n if verbose > 0:\n- funcargspec = \"%s -- %s\" % (argname, bestrel)\n- else:\n- funcargspec = argname\n- tw.line(funcargspec, green=True)\n+ tw.write(\" -- %s\" % bestrel, yellow=True)\n+ tw.write(\"\\n\")\n loc = getlocation(fixturedef.func, curdir)\n doc = fixturedef.func.__doc__ or \"\"\n if doc:\n write_docstring(tw, doc)\n else:\n tw.line(\" %s: no docstring available\" % (loc,), red=True)\n+ tw.line()\n \n \n def write_docstring(tw, doc, indent=\" \"):\n", - "expected_spans": { - "src/_pytest/python.py": [ - "_showfixtures_main" + "test_file_spans": { + "sympy/functions/combinatorial/tests/test_comb_numbers.py": [ + "test_fibonacci", + "test_bell" ] }, "resolved_by": [], "alternative_spans": [] }, { - "instance_id": "pytest-dev__pytest-5227", - "repo": "pytest-dev/pytest", - "base_commit": "2051e30b9b596e944524ccb787ed20f9f5be93e3", - "problem_statement": "Improve default logging format\nCurrently it is:\r\n\r\n> DEFAULT_LOG_FORMAT = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\r\n\r\nI think `name` (module name) would be very useful here, instead of just the base filename.\r\n\r\n(It might also be good to have the relative path there (maybe at the end), but it is usually still very long (but e.g. `$VIRTUAL_ENV` could be substituted therein))\r\n\r\nCurrently it would look like this:\r\n```\r\nutils.py 114 DEBUG (0.000) SELECT \"app_url\".\"id\", \"app_url\".\"created\", \"app_url\".\"url\" FROM \"app_url\" WHERE \"app_url\".\"id\" = 2; args=(2,)\r\nmultipart.py 604 DEBUG Calling on_field_start with no data\r\n```\r\n\r\n\r\nUsing `DEFAULT_LOG_FORMAT = \"%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s\"` instead:\r\n\r\n```\r\nDEBUG django.db.backends:utils.py:114 (0.000) SELECT \"app_url\".\"id\", \"app_url\".\"created\", \"app_url\".\"url\" FROM \"app_url\" WHERE \"app_url\".\"id\" = 2; args=(2,)\r\nDEBUG multipart.multipart:multipart.py:604 Calling on_field_start with no data\r\n```\n", - "golden_patch": "diff --git a/src/_pytest/logging.py b/src/_pytest/logging.py\n--- a/src/_pytest/logging.py\n+++ b/src/_pytest/logging.py\n@@ -15,7 +15,7 @@\n from _pytest.config import create_terminal_writer\n from _pytest.pathlib import Path\n \n-DEFAULT_LOG_FORMAT = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\n+DEFAULT_LOG_FORMAT = \"%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s\"\n DEFAULT_LOG_DATE_FORMAT = \"%H:%M:%S\"\n \n \n", + "instance_id": "sympy__sympy-13471", + "repo": "sympy/sympy", + "base_commit": "3546ac7ed78e1780c1a76929864bb33330055740", + "problem_statement": "Python 2->3 pickle fails with float-containing expressions\nDumping a pickled sympy expression containing a float in Python 2, then loading it in Python 3 generates an error.\r\n\r\nHere is a minimum working example, verified with sympy git commit 3546ac7 (master at time of writing), Python 2.7 and Python 3.6:\r\n\r\n```python\r\npython2 -c 'import pickle; import sympy; x = sympy.symbols(\"x\"); print pickle.dumps(x + 1.0, 2)' | python3 -c 'import pickle; import sys; print(pickle.loads(sys.stdin.buffer.read()))'\r\n```\r\n\r\nand the result:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"/Users/alex/git/VU/sympy/sympy/core/numbers.py\", line 1045, in __new__\r\n num[1] = long(num[1], 16)\r\nValueError: invalid literal for int() with base 16: '1L'\r\n```\n", + "golden_patch": "diff --git a/sympy/core/numbers.py b/sympy/core/numbers.py\n--- a/sympy/core/numbers.py\n+++ b/sympy/core/numbers.py\n@@ -1042,6 +1042,11 @@ def __new__(cls, num, dps=None, prec=None, precision=None):\n # it's a hexadecimal (coming from a pickled object)\n # assume that it is in standard form\n num = list(num)\n+ # If we're loading an object pickled in Python 2 into\n+ # Python 3, we may need to strip a tailing 'L' because\n+ # of a shim for int on Python 3, see issue #13470.\n+ if num[1].endswith('L'):\n+ num[1] = num[1][:-1]\n num[1] = long(num[1], 16)\n _mpf_ = tuple(num)\n else:\n", + "test_patch": "diff --git a/sympy/core/tests/test_numbers.py b/sympy/core/tests/test_numbers.py\n--- a/sympy/core/tests/test_numbers.py\n+++ b/sympy/core/tests/test_numbers.py\n@@ -582,6 +582,12 @@ def test_Float_issue_2107():\n assert S.Zero + b + (-b) == 0\n \n \n+def test_Float_from_tuple():\n+ a = Float((0, '1L', 0, 1))\n+ b = Float((0, '1', 0, 1))\n+ assert a == b\n+\n+\n def test_Infinity():\n assert oo != 1\n assert 1*oo == oo\n", + "fail_to_pass": "[\"test_Float_from_tuple\"]", + "pass_to_pass": "[\"test_integers_cache\", \"test_seterr\", \"test_mod\", \"test_divmod\", \"test_igcd\", \"test_igcd_lehmer\", \"test_igcd2\", \"test_ilcm\", \"test_igcdex\", \"test_Integer_new\", \"test_Rational_new\", \"test_Number_new\", \"test_Rational_cmp\", \"test_Float\", \"test_float_mpf\", \"test_Float_RealElement\", \"test_Float_default_to_highprec_from_str\", \"test_Float_eval\", \"test_Float_issue_2107\", \"test_Infinity\", \"test_Infinity_2\", \"test_Mul_Infinity_Zero\", \"test_Div_By_Zero\", \"test_Infinity_inequations\", \"test_NaN\", \"test_special_numbers\", \"test_powers\", \"test_integer_nthroot_overflow\", \"test_isqrt\", \"test_powers_Integer\", \"test_powers_Rational\", \"test_powers_Float\", \"test_abs1\", \"test_accept_int\", \"test_dont_accept_str\", \"test_int\", \"test_long\", \"test_real_bug\", \"test_bug_sqrt\", \"test_pi_Pi\", \"test_no_len\", \"test_issue_3321\", \"test_issue_3692\", \"test_issue_3423\", \"test_issue_3449\", \"test_Integer_factors\", \"test_Rational_factors\", \"test_issue_4107\", \"test_IntegerInteger\", \"test_Rational_gcd_lcm_cofactors\", \"test_Float_gcd_lcm_cofactors\", \"test_issue_4611\", \"test_conversion_to_mpmath\", \"test_relational\", \"test_Integer_as_index\", \"test_Rational_int\", \"test_zoo\", \"test_issue_4122\", \"test_GoldenRatio_expand\", \"test_as_content_primitive\", \"test_hashing_sympy_integers\", \"test_issue_4172\", \"test_Catalan_EulerGamma_prec\", \"test_Float_eq\", \"test_int_NumberSymbols\", \"test_issue_6640\", \"test_issue_6349\", \"test_mpf_norm\", \"test_latex\", \"test_issue_7742\", \"test_simplify_AlgebraicNumber\", \"test_Float_idempotence\", \"test_comp\", \"test_issue_9491\", \"test_issue_10063\", \"test_issue_10020\", \"test_invert_numbers\", \"test_mod_inverse\", \"test_golden_ratio_rewrite_as_sqrt\", \"test_comparisons_with_unknown_type\", \"test_NumberSymbol_comparison\", \"test_Integer_precision\"]", "expected_spans": { - "src/_pytest/logging.py": [ - "impl" + "sympy/core/numbers.py": [ + "Float.__new__" + ] + }, + "test_file_spans": { + "sympy/core/tests/test_numbers.py": [ + "test_Infinity" ] }, "resolved_by": [ { - "name": "20240402_sweagent_claude3opus", - "patch": "diff --git a/src/_pytest/logging.py b/src/_pytest/logging.py\nindex 757cb2797..08670d2b2 100644\n--- a/src/_pytest/logging.py\n+++ b/src/_pytest/logging.py\n@@ -15,7 +15,7 @@ from _pytest.compat import dummy_context_manager\n from _pytest.config import create_terminal_writer\n from _pytest.pathlib import Path\n \n-DEFAULT_LOG_FORMAT = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\n+DEFAULT_LOG_FORMAT = \"%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s\"\n DEFAULT_LOG_DATE_FORMAT = \"%H:%M:%S\"\n \n \n" + "name": "20240523_aider", + "updated_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + }, + "alternative_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + } }, { - "name": "20240402_sweagent_gpt4", - "patch": "diff --git a/src/_pytest/logging.py b/src/_pytest/logging.py\nindex 757cb2797..08670d2b2 100644\n--- a/src/_pytest/logging.py\n+++ b/src/_pytest/logging.py\n@@ -15,7 +15,7 @@ from _pytest.compat import dummy_context_manager\n from _pytest.config import create_terminal_writer\n from _pytest.pathlib import Path\n \n-DEFAULT_LOG_FORMAT = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\n+DEFAULT_LOG_FORMAT = \"%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s\"\n DEFAULT_LOG_DATE_FORMAT = \"%H:%M:%S\"\n \n \ndiff --git a/testing/logging/test_formatter.py b/testing/logging/test_formatter.py\nindex abceaf857..36a07754f 100644\n--- a/testing/logging/test_formatter.py\n+++ b/testing/logging/test_formatter.py\n@@ -6,7 +6,7 @@ from _pytest.logging import ColoredLevelFormatter\n \n \n def test_coloredlogformatter():\n- logfmt = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\n+ logfmt = \"%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s\"\n \n record = logging.LogRecord(\n name=\"dummy\",\n@@ -27,10 +27,10 @@ def test_coloredlogformatter():\n formatter = ColoredLevelFormatter(tw, logfmt)\n output = formatter.format(record)\n assert output == (\n- \"dummypath 10 \\x1b[32mINFO \\x1b[0m Test Message\"\n+ \"dummy:dummypath 10 \\x1b[32mINFO \\x1b[0m Test Message\"\n )\n \n tw.hasmarkup = False\n formatter = ColoredLevelFormatter(tw, logfmt)\n output = formatter.format(record)\n- assert output == (\"dummypath 10 INFO Test Message\")\n+ assert output == (\"dummy:dummypath:10 INFO Test Message\")\n" + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + }, + "alternative_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + } }, { - "name": "20240509_amazon-q-developer-agent-20240430-dev", - "patch": "--- a/src/_pytest/logging.py\n+++ b/src/_pytest/logging.py\n@@ -12,13 +12,13 @@\n \n import pytest\n from _pytest.compat import dummy_context_manager\n from _pytest.config import create_terminal_writer\n from _pytest.pathlib import Path\n \n-DEFAULT_LOG_FORMAT = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\n+DEFAULT_LOG_FORMAT = \"%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s\"\n DEFAULT_LOG_DATE_FORMAT = \"%H:%M:%S\"\n \n \n class ColoredLevelFormatter(logging.Formatter):\n \"\"\"\n Colorize the %(levelname)..s part of the log format passed to __init__.\n" + "name": "20240828_autose_mixed", + "updated_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + }, + "alternative_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + } }, { - "name": "20240523_aider", - "patch": "diff --git a/src/_pytest/logging.py b/src/_pytest/logging.py\nindex 757cb2797..08670d2b2 100644\n--- a/src/_pytest/logging.py\n+++ b/src/_pytest/logging.py\n@@ -15,7 +15,7 @@ from _pytest.compat import dummy_context_manager\n from _pytest.config import create_terminal_writer\n from _pytest.pathlib import Path\n \n-DEFAULT_LOG_FORMAT = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\n+DEFAULT_LOG_FORMAT = \"%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s\"\n DEFAULT_LOG_DATE_FORMAT = \"%H:%M:%S\"\n \n \n" + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/src/_pytest/logging.py\n+++ b/src/_pytest/logging.py\n@@ -15,7 +15,7 @@\n from _pytest.config import create_terminal_writer\n from _pytest.pathlib import Path\n \n-DEFAULT_LOG_FORMAT = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\n+DEFAULT_LOG_FORMAT = \"%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s\"\n DEFAULT_LOG_DATE_FORMAT = \"%H:%M:%S\"\n \n \n" + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + }, + "alternative_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + } }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/src/_pytest/logging.py b/src/_pytest/logging.py\nindex 757cb2797..08670d2b2 100644\n--- a/src/_pytest/logging.py\n+++ b/src/_pytest/logging.py\n@@ -15,7 +15,7 @@ from _pytest.compat import dummy_context_manager\n from _pytest.config import create_terminal_writer\n from _pytest.pathlib import Path\n \n-DEFAULT_LOG_FORMAT = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\n+DEFAULT_LOG_FORMAT = \"%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s\"\n DEFAULT_LOG_DATE_FORMAT = \"%H:%M:%S\"\n \n \n" + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + }, + "alternative_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + } }, { - "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/src/_pytest/logging.py b/src/_pytest/logging.py\nindex 757cb2797..08670d2b2 100644\n--- a/src/_pytest/logging.py\n+++ b/src/_pytest/logging.py\n@@ -15,7 +15,7 @@ from _pytest.compat import dummy_context_manager\n from _pytest.config import create_terminal_writer\n from _pytest.pathlib import Path\n \n-DEFAULT_LOG_FORMAT = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\n+DEFAULT_LOG_FORMAT = \"%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s\"\n DEFAULT_LOG_DATE_FORMAT = \"%H:%M:%S\"\n \n \n" + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sympy/core/numbers.py": [ + "imports", + "comp", + "mpf_norm", + "impl:5", + "seterr", + "_as_integer_ratio", + "_decimal_to_Rational_prec", + "_literal_float", + "impl:7", + "igcd", + "impl:9", + "igcd_lehmer", + "ilcm", + "igcdex", + "mod_inverse", + "Number", + "Number.__new__", + "Number.__divmod__", + "Number._as_mpf_val", + "Number.floor", + "Number.ceiling", + "Number._eval_conjugate", + "Number._eval_order", + "Number._eval_subs", + "Number:13", + "Number.__eq__", + "Number.__ne__", + "Number.__lt__", + "Number.__le__", + "Number.__gt__", + "Number.__ge__", + "Number.as_coeff_mul", + "Number.as_coeff_add", + "Float", + "Float.__new__", + "Float._new", + "Float.__getnewargs__", + "Float.__getstate__", + "Float._hashable_content", + "Float.floor", + "Float.ceiling", + "Float.__nonzero__", + "Float:17", + "Float.__mod__", + "Float._eval_power", + "Float.__int__", + "Float:19", + "Float.__eq__", + "Float.__gt__", + "Float.__ge__", + "Float.__lt__", + "Float.__le__", + "Float.__hash__", + "Float.epsilon_eq", + "Float.__format__", + "impl:13", + "Rational", + "Rational.__new__", + "Rational.limit_denominator", + "Rational.__getnewargs__", + "Rational._hashable_content", + "Rational.__add__", + "Rational.__sub__", + "Rational.__rsub__", + "Rational.__mul__", + "Rational.__div__", + "Rational.__rdiv__", + "Rational.__mod__", + "Rational._eval_power", + "Rational.__int__", + "Rational.__eq__", + "Rational.__gt__", + "Rational.__ge__", + "Rational.__lt__", + "Rational.__le__", + "Rational.__hash__", + "Rational.factors", + "Rational.gcd", + "Rational.lcm", + "Rational._sage_", + "Rational.as_content_primitive", + "Rational.as_coeff_Add", + "impl:17", + "_intcache_printinfo", + "int_trace", + "Integer", + "Integer._mpmath_", + "Integer.__new__", + "Integer.__getnewargs__", + "Integer.__int__", + "Integer.__divmod__", + "Integer.__rdivmod__", + "Integer.__add__", + "Integer.__radd__", + "Integer.__sub__", + "Integer.__rsub__", + "Integer.__mul__", + "Integer.__rmul__", + "Integer.__eq__", + "Integer.__gt__", + "Integer.__lt__", + "Integer.__ge__", + "Integer.__le__", + "Integer.__index__", + "Integer._eval_power", + "Integer._eval_is_prime", + "Integer.__rfloordiv__", + "impl:23", + "AlgebraicNumber", + "AlgebraicNumber.__new__", + "AlgebraicNumber.as_poly", + "AlgebraicNumber.coeffs", + "AlgebraicNumber.to_algebraic_integer", + "AlgebraicNumber._eval_simplify", + "Zero", + "Zero._eval_power", + "Zero._eval_order", + "Zero.__nonzero__", + "Zero:17", + "Zero.as_coeff_Mul", + "One", + "One.factors", + "NegativeOne", + "NegativeOne._eval_power", + "Half", + "Infinity", + "Infinity.__new__", + "Infinity._latex", + "Infinity.__mul__", + "Infinity:17", + "Infinity.__div__", + "Infinity._eval_power", + "Infinity.__lt__", + "Infinity.__le__", + "Infinity.__gt__", + "Infinity.__ge__", + "Infinity.__mod__", + "impl:25", + "NegativeInfinity", + "NegativeInfinity.__new__", + "NegativeInfinity._latex", + "NegativeInfinity.__mul__", + "NegativeInfinity:15", + "NegativeInfinity.__div__", + "NegativeInfinity._eval_power", + "NegativeInfinity._as_mpf_val", + "NegativeInfinity._sage_", + "NegativeInfinity.__lt__", + "NegativeInfinity.__le__", + "NegativeInfinity.__gt__", + "NegativeInfinity.__ge__", + "NegativeInfinity.__mod__", + "NaN", + "NaN.__new__", + "NaN._latex", + "NaN.__div__", + "NaN.__eq__", + "NaN.__ne__", + "NaN._eval_Eq", + "NaN:33", + "impl:27", + "ComplexInfinity", + "ComplexInfinity.__new__", + "ComplexInfinity._latex", + "ComplexInfinity._eval_power", + "NumberSymbol", + "NumberSymbol.__eq__", + "NumberSymbol.__lt__", + "NumberSymbol.__le__", + "NumberSymbol.__gt__", + "NumberSymbol.__ge__", + "NumberSymbol.__int__", + "Exp1", + "Exp1._latex", + "Exp1.approximation_interval", + "Exp1._eval_rewrite_as_sin", + "Exp1._eval_rewrite_as_cos", + "Exp1._sage_", + "impl:31", + "Pi", + "Pi._latex", + "Pi.approximation_interval", + "Pi._sage_", + "impl:33", + "GoldenRatio", + "GoldenRatio._latex", + "GoldenRatio.__int__", + "GoldenRatio._as_mpf_val", + "GoldenRatio._eval_expand_func", + "GoldenRatio.approximation_interval", + "GoldenRatio._sage_", + "GoldenRatio:19", + "EulerGamma", + "EulerGamma._latex", + "EulerGamma.__int__", + "EulerGamma._as_mpf_val", + "EulerGamma.approximation_interval", + "Catalan", + "Catalan.__int__", + "Catalan._as_mpf_val", + "Catalan.approximation_interval", + "ImaginaryUnit", + "ImaginaryUnit._latex", + "ImaginaryUnit._eval_power", + "ImaginaryUnit._mpc_", + "impl:37", + "impl:47", + "sympify_complex", + "impl:48" + ] + }, + "alternative_spans": { + "sympy/core/numbers.py": [ + "imports", + "comp", + "mpf_norm", + "impl:5", + "seterr", + "_as_integer_ratio", + "_decimal_to_Rational_prec", + "_literal_float", + "impl:7", + "igcd", + "impl:9", + "igcd_lehmer", + "ilcm", + "igcdex", + "mod_inverse", + "Number", + "Number.__new__", + "Number.__divmod__", + "Number._as_mpf_val", + "Number.floor", + "Number.ceiling", + "Number._eval_conjugate", + "Number._eval_order", + "Number._eval_subs", + "Number:13", + "Number.__eq__", + "Number.__ne__", + "Number.__lt__", + "Number.__le__", + "Number.__gt__", + "Number.__ge__", + "Number.as_coeff_mul", + "Number.as_coeff_add", + "Float", + "Float.__new__", + "Float._new", + "Float.__getnewargs__", + "Float.__getstate__", + "Float._hashable_content", + "Float.floor", + "Float.ceiling", + "Float.__nonzero__", + "Float:17", + "Float.__mod__", + "Float._eval_power", + "Float.__int__", + "Float:19", + "Float.__eq__", + "Float.__gt__", + "Float.__ge__", + "Float.__lt__", + "Float.__le__", + "Float.__hash__", + "Float.epsilon_eq", + "Float.__format__", + "impl:13", + "Rational", + "Rational.__new__", + "Rational.limit_denominator", + "Rational.__getnewargs__", + "Rational._hashable_content", + "Rational.__add__", + "Rational.__sub__", + "Rational.__rsub__", + "Rational.__mul__", + "Rational.__div__", + "Rational.__rdiv__", + "Rational.__mod__", + "Rational._eval_power", + "Rational.__int__", + "Rational.__eq__", + "Rational.__gt__", + "Rational.__ge__", + "Rational.__lt__", + "Rational.__le__", + "Rational.__hash__", + "Rational.factors", + "Rational.gcd", + "Rational.lcm", + "Rational._sage_", + "Rational.as_content_primitive", + "Rational.as_coeff_Add", + "impl:17", + "_intcache_printinfo", + "int_trace", + "Integer", + "Integer._mpmath_", + "Integer.__new__", + "Integer.__getnewargs__", + "Integer.__int__", + "Integer.__divmod__", + "Integer.__rdivmod__", + "Integer.__add__", + "Integer.__radd__", + "Integer.__sub__", + "Integer.__rsub__", + "Integer.__mul__", + "Integer.__rmul__", + "Integer.__eq__", + "Integer.__gt__", + "Integer.__lt__", + "Integer.__ge__", + "Integer.__le__", + "Integer.__index__", + "Integer._eval_power", + "Integer._eval_is_prime", + "Integer.__rfloordiv__", + "impl:23", + "AlgebraicNumber", + "AlgebraicNumber.__new__", + "AlgebraicNumber.as_poly", + "AlgebraicNumber.coeffs", + "AlgebraicNumber.to_algebraic_integer", + "AlgebraicNumber._eval_simplify", + "Zero", + "Zero._eval_power", + "Zero._eval_order", + "Zero.__nonzero__", + "Zero:17", + "Zero.as_coeff_Mul", + "One", + "One.factors", + "NegativeOne", + "NegativeOne._eval_power", + "Half", + "Infinity", + "Infinity.__new__", + "Infinity._latex", + "Infinity.__mul__", + "Infinity:17", + "Infinity.__div__", + "Infinity._eval_power", + "Infinity.__lt__", + "Infinity.__le__", + "Infinity.__gt__", + "Infinity.__ge__", + "Infinity.__mod__", + "impl:25", + "NegativeInfinity", + "NegativeInfinity.__new__", + "NegativeInfinity._latex", + "NegativeInfinity.__mul__", + "NegativeInfinity:15", + "NegativeInfinity.__div__", + "NegativeInfinity._eval_power", + "NegativeInfinity._as_mpf_val", + "NegativeInfinity._sage_", + "NegativeInfinity.__lt__", + "NegativeInfinity.__le__", + "NegativeInfinity.__gt__", + "NegativeInfinity.__ge__", + "NegativeInfinity.__mod__", + "NaN", + "NaN.__new__", + "NaN._latex", + "NaN.__div__", + "NaN.__eq__", + "NaN.__ne__", + "NaN._eval_Eq", + "NaN:33", + "impl:27", + "ComplexInfinity", + "ComplexInfinity.__new__", + "ComplexInfinity._latex", + "ComplexInfinity._eval_power", + "NumberSymbol", + "NumberSymbol.__eq__", + "NumberSymbol.__lt__", + "NumberSymbol.__le__", + "NumberSymbol.__gt__", + "NumberSymbol.__ge__", + "NumberSymbol.__int__", + "Exp1", + "Exp1._latex", + "Exp1.approximation_interval", + "Exp1._eval_rewrite_as_sin", + "Exp1._eval_rewrite_as_cos", + "Exp1._sage_", + "impl:31", + "Pi", + "Pi._latex", + "Pi.approximation_interval", + "Pi._sage_", + "impl:33", + "GoldenRatio", + "GoldenRatio._latex", + "GoldenRatio.__int__", + "GoldenRatio._as_mpf_val", + "GoldenRatio._eval_expand_func", + "GoldenRatio.approximation_interval", + "GoldenRatio._sage_", + "GoldenRatio:19", + "EulerGamma", + "EulerGamma._latex", + "EulerGamma.__int__", + "EulerGamma._as_mpf_val", + "EulerGamma.approximation_interval", + "Catalan", + "Catalan.__int__", + "Catalan._as_mpf_val", + "Catalan.approximation_interval", + "ImaginaryUnit", + "ImaginaryUnit._latex", + "ImaginaryUnit._eval_power", + "ImaginaryUnit._mpc_", + "impl:37", + "impl:47", + "sympify_complex", + "impl:48" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/.coveragerc b/.coveragerc\ndeleted file mode 100644\nindex cbc6c5c50..000000000\n--- a/.coveragerc\n+++ /dev/null\n@@ -1,18 +0,0 @@\n-[run]\n-include =\n- src/*\n- testing/*\n- */lib/python*/site-packages/_pytest/*\n- */lib/python*/site-packages/pytest.py\n- */pypy*/site-packages/_pytest/*\n- */pypy*/site-packages/pytest.py\n- *\\Lib\\site-packages\\_pytest\\*\n- *\\Lib\\site-packages\\pytest.py\n-parallel = 1\n-branch = 1\n-\n-[paths]\n-source = src/\n- */lib/python*/site-packages/\n- */pypy*/site-packages/\n- *\\Lib\\site-packages\\\ndiff --git a/src/_pytest/logging.py b/src/_pytest/logging.py\nindex 757cb2797..08670d2b2 100644\n--- a/src/_pytest/logging.py\n+++ b/src/_pytest/logging.py\n@@ -15,7 +15,7 @@ from _pytest.compat import dummy_context_manager\n from _pytest.config import create_terminal_writer\n from _pytest.pathlib import Path\n \n-DEFAULT_LOG_FORMAT = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\n+DEFAULT_LOG_FORMAT = \"%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s\"\n DEFAULT_LOG_DATE_FORMAT = \"%H:%M:%S\"\n \n \n" + "name": "20240820_honeycomb", + "updated_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + }, + "alternative_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + } }, { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/src/_pytest/logging.py b/src/_pytest/logging.py\nindex 757cb2797..08670d2b2 100644\n--- a/src/_pytest/logging.py\n+++ b/src/_pytest/logging.py\n@@ -15,7 +15,7 @@ from _pytest.compat import dummy_context_manager\n from _pytest.config import create_terminal_writer\n from _pytest.pathlib import Path\n \n-DEFAULT_LOG_FORMAT = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\n+DEFAULT_LOG_FORMAT = \"%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s\"\n DEFAULT_LOG_DATE_FORMAT = \"%H:%M:%S\"\n \n \n" + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sympy/core/numbers.py": [ + "imports", + "Float.__new__" + ] + }, + "alternative_spans": { + "sympy/core/numbers.py": [ + "imports", + "Float.__new__" + ] + } }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "diff --git a/src/_pytest/logging.py b/src/_pytest/logging.py\nindex 757cb2797..08670d2b2 100644\n--- a/src/_pytest/logging.py\n+++ b/src/_pytest/logging.py\n@@ -15,7 +15,7 @@ from _pytest.compat import dummy_context_manager\n from _pytest.config import create_terminal_writer\n from _pytest.pathlib import Path\n \n-DEFAULT_LOG_FORMAT = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\n+DEFAULT_LOG_FORMAT = \"%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s\"\n DEFAULT_LOG_DATE_FORMAT = \"%H:%M:%S\"\n \n \ndiff --git a/testing/logging/test_formatter.py b/testing/logging/test_formatter.py\nindex abceaf857..36a07754f 100644\n--- a/testing/logging/test_formatter.py\n+++ b/testing/logging/test_formatter.py\n@@ -6,7 +6,7 @@ from _pytest.logging import ColoredLevelFormatter\n \n \n def test_coloredlogformatter():\n- logfmt = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\n+ logfmt = \"%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s\"\n \n record = logging.LogRecord(\n name=\"dummy\",\n@@ -27,10 +27,10 @@ def test_coloredlogformatter():\n formatter = ColoredLevelFormatter(tw, logfmt)\n output = formatter.format(record)\n assert output == (\n- \"dummypath 10 \\x1b[32mINFO \\x1b[0m Test Message\"\n+ \"dummy:dummypath 10 \\x1b[32mINFO \\x1b[0m Test Message\"\n )\n \n tw.hasmarkup = False\n formatter = ColoredLevelFormatter(tw, logfmt)\n output = formatter.format(record)\n- assert output == (\"dummypath 10 INFO Test Message\")\n+ assert output == (\"dummy:dummypath:10 INFO Test Message\")\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-16792", - "repo": "sympy/sympy", - "base_commit": "09786a173e7a0a488f46dd6000177c23e5d24eed", - "problem_statement": "autowrap with cython backend fails when array arguments do not appear in wrapped expr\nWhen using the cython backend for autowrap, it appears that the code is not correctly generated when the function in question has array arguments that do not appear in the final expression. A minimal counterexample is:\r\n\r\n```python\r\nfrom sympy.utilities.autowrap import autowrap\r\nfrom sympy import MatrixSymbol\r\nimport numpy as np\r\n\r\nx = MatrixSymbol('x', 2, 1)\r\nexpr = 1.0\r\nf = autowrap(expr, args=(x,), backend='cython')\r\n\r\nf(np.array([[1.0, 2.0]]))\r\n```\r\n\r\nThis should of course return `1.0` but instead fails with:\r\n```python\r\nTypeError: only size-1 arrays can be converted to Python scalars\r\n```\r\n\r\nA little inspection reveals that this is because the corresponding C function is generated with an incorrect signature:\r\n\r\n```C\r\ndouble autofunc(double x) {\r\n\r\n double autofunc_result;\r\n autofunc_result = 1.0;\r\n return autofunc_result;\r\n\r\n}\r\n```\r\n\r\n(`x` should be `double *`, not `double` in this case)\r\n\r\nI've found that this error won't occur so long as `expr` depends at least in part on each argument. For example this slight modification of the above counterexample works perfectly:\r\n\r\n```python\r\nfrom sympy.utilities.autowrap import autowrap\r\nfrom sympy import MatrixSymbol\r\nimport numpy as np\r\n\r\nx = MatrixSymbol('x', 2, 1)\r\n# now output depends on x\r\nexpr = x[0,0]\r\nf = autowrap(expr, args=(x,), backend='cython')\r\n\r\n# returns 1.0 as expected, without failure\r\nf(np.array([[1.0, 2.0]]))\r\n```\r\n\r\nThis may seem like a silly issue (\"why even have `x` as an argument if it doesn't appear in the expression you're trying to evaluate?\"). But of course in interfacing with external libraries (e.g. for numerical integration), one often needs functions to have a pre-defined signature regardless of whether a given argument contributes to the output.\r\n\r\nI think I've identified the problem in `codegen` and will suggest a PR shortly.\n", - "golden_patch": "diff --git a/sympy/utilities/codegen.py b/sympy/utilities/codegen.py\n--- a/sympy/utilities/codegen.py\n+++ b/sympy/utilities/codegen.py\n@@ -695,6 +695,11 @@ def routine(self, name, expr, argument_sequence=None, global_vars=None):\n arg_list = []\n \n # setup input argument list\n+\n+ # helper to get dimensions for data for array-like args\n+ def dimensions(s):\n+ return [(S.Zero, dim - 1) for dim in s.shape]\n+\n array_symbols = {}\n for array in expressions.atoms(Indexed) | local_expressions.atoms(Indexed):\n array_symbols[array.base.label] = array\n@@ -703,11 +708,8 @@ def routine(self, name, expr, argument_sequence=None, global_vars=None):\n \n for symbol in sorted(symbols, key=str):\n if symbol in array_symbols:\n- dims = []\n array = array_symbols[symbol]\n- for dim in array.shape:\n- dims.append((S.Zero, dim - 1))\n- metadata = {'dimensions': dims}\n+ metadata = {'dimensions': dimensions(array)}\n else:\n metadata = {}\n \n@@ -739,7 +741,11 @@ def routine(self, name, expr, argument_sequence=None, global_vars=None):\n try:\n new_args.append(name_arg_dict[symbol])\n except KeyError:\n- new_args.append(InputArgument(symbol))\n+ if isinstance(symbol, (IndexedBase, MatrixSymbol)):\n+ metadata = {'dimensions': dimensions(symbol)}\n+ else:\n+ metadata = {}\n+ new_args.append(InputArgument(symbol, **metadata))\n arg_list = new_args\n \n return Routine(name, arg_list, return_val, local_vars, global_vars)\n", - "expected_spans": { - "sympy/utilities/codegen.py": [ - "CodeGen.routine" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "django__django-11422", - "repo": "django/django", - "base_commit": "df46b329e0900e9e4dc1d60816c1dce6dfc1094e", - "problem_statement": "Autoreloader with StatReloader doesn't track changes in manage.py.\nDescription\n\t \n\t\t(last modified by Mariusz Felisiak)\n\t \nThis is a bit convoluted, but here we go.\nEnvironment (OSX 10.11):\n$ python -V\nPython 3.6.2\n$ pip -V\npip 19.1.1\n$ pip install Django==2.2.1\nSteps to reproduce:\nRun a server python manage.py runserver\nEdit the manage.py file, e.g. add print(): \ndef main():\n\tprint('sth')\n\tos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ticket_30479.settings')\n\t...\nUnder 2.1.8 (and prior), this will trigger the auto-reloading mechanism. Under 2.2.1, it won't. As far as I can tell from the django.utils.autoreload log lines, it never sees the manage.py itself.\n", - "golden_patch": "diff --git a/django/utils/autoreload.py b/django/utils/autoreload.py\n--- a/django/utils/autoreload.py\n+++ b/django/utils/autoreload.py\n@@ -114,7 +114,15 @@ def iter_modules_and_files(modules, extra_files):\n # During debugging (with PyDev) the 'typing.io' and 'typing.re' objects\n # are added to sys.modules, however they are types not modules and so\n # cause issues here.\n- if not isinstance(module, ModuleType) or getattr(module, '__spec__', None) is None:\n+ if not isinstance(module, ModuleType):\n+ continue\n+ if module.__name__ == '__main__':\n+ # __main__ (usually manage.py) doesn't always have a __spec__ set.\n+ # Handle this by falling back to using __file__, resolved below.\n+ # See https://docs.python.org/reference/import.html#main-spec\n+ sys_file_paths.append(module.__file__)\n+ continue\n+ if getattr(module, '__spec__', None) is None:\n continue\n spec = module.__spec__\n # Modules could be loaded from places without a concrete location. If\n", - "expected_spans": { - "django/utils/autoreload.py": [ - "iter_modules_and_files" - ] - }, - "resolved_by": [ - { - "name": "20240523_aider", - "patch": "diff --git a/django/utils/autoreload.py b/django/utils/autoreload.py\nindex 7ca0d68fae..622333d627 100644\n--- a/django/utils/autoreload.py\n+++ b/django/utils/autoreload.py\n@@ -103,7 +103,9 @@ def iter_all_python_module_files():\n # aren't loaded on the fly.\n keys = sorted(sys.modules)\n modules = tuple(m for m in map(sys.modules.__getitem__, keys) if not isinstance(m, weakref.ProxyTypes))\n- return iter_modules_and_files(modules, frozenset(_error_files))\n+ # Include manage.py explicitly\n+ manage_py_path = Path(sys.argv[0]).resolve()\n+ return iter_modules_and_files(modules, frozenset(_error_files).union({manage_py_path}))\n \n \n @functools.lru_cache(maxsize=1)\n" - } - ], - "alternative_spans": [ - { - "run_name": "20240523_aider", - "spans": { - "django/utils/autoreload.py": [ - "iter_all_python_module_files" + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + }, + "alternative_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" ] } - } - ] - }, - { - "instance_id": "pytest-dev__pytest-5413", - "repo": "pytest-dev/pytest", - "base_commit": "450d2646233c670654744d3d24330b69895bb9d2", - "problem_statement": "str() on the pytest.raises context variable doesn't behave same as normal exception catch\nPytest 4.6.2, macOS 10.14.5\r\n\r\n```Python\r\ntry:\r\n raise LookupError(\r\n f\"A\\n\"\r\n f\"B\\n\"\r\n f\"C\"\r\n )\r\nexcept LookupError as e:\r\n print(str(e))\r\n```\r\nprints\r\n\r\n> A\r\n> B\r\n> C\r\n\r\nBut\r\n\r\n```Python\r\nwith pytest.raises(LookupError) as e:\r\n raise LookupError(\r\n f\"A\\n\"\r\n f\"B\\n\"\r\n f\"C\"\r\n )\r\n\r\nprint(str(e))\r\n```\r\n\r\nprints\r\n\r\n> :3: LookupError: A\r\n\r\nIn order to get the full error message, one must do `str(e.value)`, which is documented, but this is a different interaction. Any chance the behavior could be changed to eliminate this gotcha?\r\n\r\n-----\r\n\r\nPip list gives\r\n\r\n```\r\nPackage Version Location\r\n------------------ -------- ------------------------------------------------------\r\napipkg 1.5\r\nasn1crypto 0.24.0\r\natomicwrites 1.3.0\r\nattrs 19.1.0\r\naws-xray-sdk 0.95\r\nboto 2.49.0\r\nboto3 1.9.51\r\nbotocore 1.12.144\r\ncertifi 2019.3.9\r\ncffi 1.12.3\r\nchardet 3.0.4\r\nClick 7.0\r\ncodacy-coverage 1.3.11\r\ncolorama 0.4.1\r\ncoverage 4.5.3\r\ncryptography 2.6.1\r\ndecorator 4.4.0\r\ndocker 3.7.2\r\ndocker-pycreds 0.4.0\r\ndocutils 0.14\r\necdsa 0.13.2\r\nexecnet 1.6.0\r\nfuture 0.17.1\r\nidna 2.8\r\nimportlib-metadata 0.17\r\nipaddress 1.0.22\r\nJinja2 2.10.1\r\njmespath 0.9.4\r\njsondiff 1.1.1\r\njsonpickle 1.1\r\njsonschema 2.6.0\r\nMarkupSafe 1.1.1\r\nmock 3.0.4\r\nmore-itertools 7.0.0\r\nmoto 1.3.7\r\nneobolt 1.7.10\r\nneotime 1.7.4\r\nnetworkx 2.1\r\nnumpy 1.15.0\r\npackaging 19.0\r\npandas 0.24.2\r\npip 19.1.1\r\npluggy 0.12.0\r\nprompt-toolkit 2.0.9\r\npy 1.8.0\r\npy2neo 4.2.0\r\npyaml 19.4.1\r\npycodestyle 2.5.0\r\npycparser 2.19\r\npycryptodome 3.8.1\r\nPygments 2.3.1\r\npyOpenSSL 19.0.0\r\npyparsing 2.4.0\r\npytest 4.6.2\r\npytest-cache 1.0\r\npytest-codestyle 1.4.0\r\npytest-cov 2.6.1\r\npytest-forked 1.0.2\r\npython-dateutil 2.7.3\r\npython-jose 2.0.2\r\npytz 2018.5\r\nPyYAML 5.1\r\nrequests 2.21.0\r\nrequests-mock 1.5.2\r\nresponses 0.10.6\r\ns3transfer 0.1.13\r\nsetuptools 41.0.1\r\nsix 1.11.0\r\nsqlite3worker 1.1.7\r\ntabulate 0.8.3\r\nurllib3 1.24.3\r\nwcwidth 0.1.7\r\nwebsocket-client 0.56.0\r\nWerkzeug 0.15.2\r\nwheel 0.33.1\r\nwrapt 1.11.1\r\nxlrd 1.1.0\r\nxmltodict 0.12.0\r\nzipp 0.5.1\r\n```\n", - "golden_patch": "diff --git a/src/_pytest/_code/code.py b/src/_pytest/_code/code.py\n--- a/src/_pytest/_code/code.py\n+++ b/src/_pytest/_code/code.py\n@@ -534,13 +534,6 @@ def getrepr(\n )\n return fmt.repr_excinfo(self)\n \n- def __str__(self):\n- if self._excinfo is None:\n- return repr(self)\n- entry = self.traceback[-1]\n- loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())\n- return str(loc)\n-\n def match(self, regexp):\n \"\"\"\n Check whether the regular expression 'regexp' is found in the string\n", - "expected_spans": { - "src/_pytest/_code/code.py": [ - "ExceptionInfo.__str__" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-16988", - "repo": "sympy/sympy", - "base_commit": "e727339af6dc22321b00f52d971cda39e4ce89fb", - "problem_statement": "Intersection should remove duplicates\n```python\r\n>>> Intersection({1},{1},{x})\r\nEmptySet()\r\n>>> Intersection({1},{x})\r\n{1}\r\n```\r\nThe answer should be `Piecewise(({1}, Eq(x, 1)), (S.EmptySet, True))` or remain unevaluated.\r\n\r\nThe routine should give the same answer if duplicates are present; my initial guess is that duplicates should just be removed at the outset of instantiation. Ordering them will produce canonical processing.\n", - "golden_patch": "diff --git a/sympy/sets/sets.py b/sympy/sets/sets.py\n--- a/sympy/sets/sets.py\n+++ b/sympy/sets/sets.py\n@@ -1260,7 +1260,7 @@ def __new__(cls, *args, **kwargs):\n evaluate = kwargs.get('evaluate', global_evaluate[0])\n \n # flatten inputs to merge intersections and iterables\n- args = _sympify(args)\n+ args = list(ordered(set(_sympify(args))))\n \n # Reduce sets using known rules\n if evaluate:\n", - "expected_spans": { - "sympy/sets/sets.py": [ - "Intersection.__new__" - ] - }, - "resolved_by": [ - { - "name": "20240402_sweagent_gpt4", - "patch": "diff --git a/reproduce_issue.py b/reproduce_issue.py\nnew file mode 100644\nindex 0000000000..cc9bc24f8f\n--- /dev/null\n+++ b/reproduce_issue.py\n@@ -0,0 +1,9 @@\n+from sympy import Intersection, Eq, S, Piecewise\n+\n+# Attempt to reproduce the issue\n+result1 = Intersection({1}, {1}, {S('x')})\n+result2 = Intersection({1}, {S('x')})\n+\n+print(\"Intersection({1},{1},{x}):\", result1)\n+print(\"Intersection({1},{x}):\", result2)\n+print(\"Expected for both:\", Piecewise(({1}, Eq(S('x'), 1)), (S.EmptySet, True)))\ndiff --git a/sympy/sets/sets.py b/sympy/sets/sets.py\nindex 03cf57a30b..fa865a6336 100644\n--- a/sympy/sets/sets.py\n+++ b/sympy/sets/sets.py\n@@ -1262,6 +1262,9 @@ def __new__(cls, *args, **kwargs):\n # flatten inputs to merge intersections and iterables\n args = _sympify(args)\n \n+ # Remove duplicates\n+ args = list(ordered(set(args)))\n+\n # Reduce sets using known rules\n if evaluate:\n args = list(cls._new_args_filter(args))\n" }, { - "name": "20240509_amazon-q-developer-agent-20240430-dev", - "patch": "--- a/sympy/sets/sets.py\n+++ b/sympy/sets/sets.py\n@@ -1262,15 +1262,16 @@\n # flatten inputs to merge intersections and iterables\n args = _sympify(args)\n \n # Reduce sets using known rules\n if evaluate:\n args = list(cls._new_args_filter(args))\n+ args = list(ordered(set(args), Set._infimum_key))\n return simplify_intersection(args)\n \n- args = list(ordered(args, Set._infimum_key))\n+ args = list(ordered(set(args), Set._infimum_key))\n \n obj = Basic.__new__(cls, *args)\n obj._argset = frozenset(args)\n return obj\n \n @property\n" + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sympy/core/numbers.py": [ + "mpf_norm", + "Float.__new__" + ] + }, + "alternative_spans": { + "sympy/core/numbers.py": [ + "mpf_norm", + "Float.__new__" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/.coveragerc b/.coveragerc\nnew file mode 100644\nindex 0000000000..bef85d5ee1\n--- /dev/null\n+++ b/.coveragerc\n@@ -0,0 +1,5 @@\n+[run]\n+omit = \n+\t# adde`d by auto-code-rover\n+\tsympy/sets/tests/test_sets.py\n+\ndiff --git a/sympy/sets/sets.py b/sympy/sets/sets.py\nindex 03cf57a30b..874300dabf 100644\n--- a/sympy/sets/sets.py\n+++ b/sympy/sets/sets.py\n@@ -1259,15 +1259,14 @@ def zero(self):\n def __new__(cls, *args, **kwargs):\n evaluate = kwargs.get('evaluate', global_evaluate[0])\n \n- # flatten inputs to merge intersections and iterables\n- args = _sympify(args)\n-\n+ # Remove duplicates and flatten inputs to merge intersections and iterables\n+ args = list(ordered(set(_sympify(args))))\n # Reduce sets using known rules\n if evaluate:\n args = list(cls._new_args_filter(args))\n return simplify_intersection(args)\n \n- args = list(ordered(args, Set._infimum_key))\n+ args = list(ordered(args))\n \n obj = Basic.__new__(cls, *args)\n obj._argset = frozenset(args)\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-17022", - "repo": "sympy/sympy", - "base_commit": "f91de695585c1fbc7d4f49ee061f64fcb1c2c4d8", - "problem_statement": "Lambdify misinterprets some matrix expressions\nUsing lambdify on an expression containing an identity matrix gives us an unexpected result:\r\n\r\n```python\r\n>>> import numpy as np\r\n>>> n = symbols('n', integer=True)\r\n>>> A = MatrixSymbol(\"A\", n, n)\r\n>>> a = np.array([[1, 2], [3, 4]])\r\n>>> f = lambdify(A, A + Identity(n))\r\n>>> f(a)\r\narray([[1.+1.j, 2.+1.j],\r\n [3.+1.j, 4.+1.j]])\r\n```\r\n\r\nInstead, the output should be `array([[2, 2], [3, 5]])`, since we're adding an identity matrix to the array. Inspecting the globals and source code of `f` shows us why we get the result:\r\n\r\n```python\r\n>>> import inspect\r\n>>> print(inspect.getsource(f))\r\ndef _lambdifygenerated(A):\r\n return (I + A)\r\n>>> f.__globals__['I']\r\n1j\r\n```\r\n\r\nThe code printer prints `I`, which is currently being interpreted as a Python built-in complex number. The printer should support printing identity matrices, and signal an error for unsupported expressions that might be misinterpreted.\n", - "golden_patch": "diff --git a/sympy/printing/pycode.py b/sympy/printing/pycode.py\n--- a/sympy/printing/pycode.py\n+++ b/sympy/printing/pycode.py\n@@ -608,6 +608,13 @@ def _print_MatrixBase(self, expr):\n func = self._module_format('numpy.array')\n return \"%s(%s)\" % (func, self._print(expr.tolist()))\n \n+ def _print_Identity(self, expr):\n+ shape = expr.shape\n+ if all([dim.is_Integer for dim in shape]):\n+ return \"%s(%s)\" % (self._module_format('numpy.eye'), self._print(expr.shape[0]))\n+ else:\n+ raise NotImplementedError(\"Symbolic matrix dimensions are not yet supported for identity matrices\")\n+\n def _print_BlockMatrix(self, expr):\n return '{0}({1})'.format(self._module_format('numpy.block'),\n self._print(expr.args[0].tolist()))\n", - "expected_spans": { - "sympy/printing/pycode.py": [ - "NumPyPrinter._print_BlockMatrix" - ] - }, - "resolved_by": [ - { - "name": "20240509_amazon-q-developer-agent-20240430-dev", - "patch": "--- a/sympy/printing/pycode.py\n+++ b/sympy/printing/pycode.py\n@@ -667,12 +667,18 @@\n self._print(expr.permutation.args[0]),\n )\n \n def _print_CodegenArrayElementwiseAdd(self, expr):\n return self._expand_fold_binary_op('numpy.add', expr.args)\n \n+ def _print_Identity(self, expr):\n+ shape = expr.shape\n+ if all(dim.is_Integer for dim in shape):\n+ return \"%s(%s)\" % (self._module_format('numpy.eye'), self._print(shape[0]))\n+ else:\n+ raise NotImplementedError(\"Symbolic matrix dimensions are not yet supported for identity matrices\")\n \n for k in NumPyPrinter._kf:\n setattr(NumPyPrinter, '_print_%s' % k, _print_known_func)\n \n for k in NumPyPrinter._kc:\n setattr(NumPyPrinter, '_print_%s' % k, _print_known_const)\n" - } - ], - "alternative_spans": [ - { - "run_name": "20240509_amazon-q-developer-agent-20240430-dev", - "spans": { - "sympy/printing/pycode.py": [ - "NumPyPrinter._print_CodegenArrayElementwiseAdd", - "impl:35" + "name": "20240617_factory_code_droid", + "updated_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + }, + "alternative_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" ] } - } - ] - }, - { - "instance_id": "scikit-learn__scikit-learn-14087", - "repo": "scikit-learn/scikit-learn", - "base_commit": "a5743ed36fbd3fbc8e351bdab16561fbfca7dfa1", - "problem_statement": "IndexError thrown with LogisticRegressionCV and refit=False\n#### Description\r\nThe following error is thrown when trying to estimate a regularization parameter via cross-validation, *without* refitting.\r\n\r\n#### Steps/Code to Reproduce\r\n```python\r\nimport sys\r\nimport sklearn\r\nfrom sklearn.linear_model import LogisticRegressionCV\r\nimport numpy as np\r\n\r\nnp.random.seed(29)\r\nX = np.random.normal(size=(1000, 3))\r\nbeta = np.random.normal(size=3)\r\nintercept = np.random.normal(size=None)\r\ny = np.sign(intercept + X @ beta)\r\n\r\nLogisticRegressionCV(\r\ncv=5,\r\nsolver='saga', # same error with 'liblinear'\r\ntol=1e-2,\r\nrefit=False).fit(X, y)\r\n```\r\n\r\n\r\n#### Expected Results\r\nNo error is thrown. \r\n\r\n#### Actual Results\r\n```\r\n---------------------------------------------------------------------------\r\nIndexError Traceback (most recent call last)\r\n in \r\n----> 1 LogisticRegressionCV(refit=False).fit(X, y)\r\n\r\n~/.pyenv/versions/3.6.7/envs/jupyter/lib/python3.6/site-packages/sklearn/linear_model/logistic.py in fit(self, X, y, sample_weight)\r\n 2192 else:\r\n 2193 w = np.mean([coefs_paths[:, i, best_indices[i], :]\r\n-> 2194 for i in range(len(folds))], axis=0)\r\n 2195 \r\n 2196 best_indices_C = best_indices % len(self.Cs_)\r\n\r\n~/.pyenv/versions/3.6.7/envs/jupyter/lib/python3.6/site-packages/sklearn/linear_model/logistic.py in (.0)\r\n 2192 else:\r\n 2193 w = np.mean([coefs_paths[:, i, best_indices[i], :]\r\n-> 2194 for i in range(len(folds))], axis=0)\r\n 2195 \r\n 2196 best_indices_C = best_indices % len(self.Cs_)\r\n\r\nIndexError: too many indices for array\r\n```\r\n\r\n#### Versions\r\n```\r\nSystem:\r\n python: 3.6.7 (default, May 13 2019, 16:14:45) [GCC 4.2.1 Compatible Apple LLVM 10.0.1 (clang-1001.0.46.4)]\r\nexecutable: /Users/tsweetser/.pyenv/versions/3.6.7/envs/jupyter/bin/python\r\n machine: Darwin-18.6.0-x86_64-i386-64bit\r\n\r\nBLAS:\r\n macros: NO_ATLAS_INFO=3, HAVE_CBLAS=None\r\n lib_dirs: \r\ncblas_libs: cblas\r\n\r\nPython deps:\r\n pip: 19.1.1\r\nsetuptools: 39.0.1\r\n sklearn: 0.21.2\r\n numpy: 1.15.1\r\n scipy: 1.1.0\r\n Cython: 0.29.6\r\n pandas: 0.24.2\r\n```\n", - "golden_patch": "diff --git a/sklearn/linear_model/logistic.py b/sklearn/linear_model/logistic.py\n--- a/sklearn/linear_model/logistic.py\n+++ b/sklearn/linear_model/logistic.py\n@@ -2170,7 +2170,7 @@ def fit(self, X, y, sample_weight=None):\n # Take the best scores across every fold and the average of\n # all coefficients corresponding to the best scores.\n best_indices = np.argmax(scores, axis=1)\n- if self.multi_class == 'ovr':\n+ if multi_class == 'ovr':\n w = np.mean([coefs_paths[i, best_indices[i], :]\n for i in range(len(folds))], axis=0)\n else:\n@@ -2180,8 +2180,11 @@ def fit(self, X, y, sample_weight=None):\n best_indices_C = best_indices % len(self.Cs_)\n self.C_.append(np.mean(self.Cs_[best_indices_C]))\n \n- best_indices_l1 = best_indices // len(self.Cs_)\n- self.l1_ratio_.append(np.mean(l1_ratios_[best_indices_l1]))\n+ if self.penalty == 'elasticnet':\n+ best_indices_l1 = best_indices // len(self.Cs_)\n+ self.l1_ratio_.append(np.mean(l1_ratios_[best_indices_l1]))\n+ else:\n+ self.l1_ratio_.append(None)\n \n if multi_class == 'multinomial':\n self.C_ = np.tile(self.C_, n_classes)\n", - "expected_spans": { - "sklearn/linear_model/logistic.py": [ - "LogisticRegressionCV.fit" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "scikit-learn__scikit-learn-14092", - "repo": "scikit-learn/scikit-learn", - "base_commit": "df7dd8391148a873d157328a4f0328528a0c4ed9", - "problem_statement": "NCA fails in GridSearch due to too strict parameter checks\nNCA checks its parameters to have a specific type, which can easily fail in a GridSearch due to how param grid is made.\r\n\r\nHere is an example:\r\n```python\r\nimport numpy as np\r\n\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.neighbors import NeighborhoodComponentsAnalysis\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\nX = np.random.random_sample((100, 10))\r\ny = np.random.randint(2, size=100)\r\n\r\nnca = NeighborhoodComponentsAnalysis()\r\nknn = KNeighborsClassifier()\r\n\r\npipe = Pipeline([('nca', nca),\r\n ('knn', knn)])\r\n \r\nparams = {'nca__tol': [0.1, 0.5, 1],\r\n 'nca__n_components': np.arange(1, 10)}\r\n \r\ngs = GridSearchCV(estimator=pipe, param_grid=params, error_score='raise')\r\ngs.fit(X,y)\r\n```\r\n\r\nThe issue is that for `tol`: 1 is not a float, and for `n_components`: np.int64 is not int\r\n\r\nBefore proposing a fix for this specific situation, I'd like to have your general opinion about parameter checking. \r\nI like this idea of common parameter checking tool introduced with the NCA PR. What do you think about extending it across the code-base (or at least for new or recent estimators) ?\r\n\r\nCurrently parameter checking is not always done or often partially done, and is quite redundant. For instance, here is the input validation of lda:\r\n```python\r\ndef _check_params(self):\r\n \"\"\"Check model parameters.\"\"\"\r\n if self.n_components <= 0:\r\n raise ValueError(\"Invalid 'n_components' parameter: %r\"\r\n % self.n_components)\r\n\r\n if self.total_samples <= 0:\r\n raise ValueError(\"Invalid 'total_samples' parameter: %r\"\r\n % self.total_samples)\r\n\r\n if self.learning_offset < 0:\r\n raise ValueError(\"Invalid 'learning_offset' parameter: %r\"\r\n % self.learning_offset)\r\n\r\n if self.learning_method not in (\"batch\", \"online\"):\r\n raise ValueError(\"Invalid 'learning_method' parameter: %r\"\r\n % self.learning_method)\r\n```\r\nmost params aren't checked and for those who are there's a lot of duplicated code.\r\n\r\nA propose to be upgrade the new tool to be able to check open/closed intervals (currently only closed) and list membership.\r\n\r\nThe api would be something like that:\r\n```\r\ncheck_param(param, name, valid_options)\r\n```\r\nwhere valid_options would be a dict of `type: constraint`. e.g for the `beta_loss` param of `NMF`, it can be either a float or a string in a list, which would give\r\n```\r\nvalid_options = {numbers.Real: None, # None for no constraint\r\n str: ['frobenius', 'kullback-leibler', 'itakura-saito']}\r\n```\r\nSometimes a parameter can only be positive or within a given interval, e.g. `l1_ratio` of `LogisticRegression` must be between 0 and 1, which would give\r\n```\r\nvalid_options = {numbers.Real: Interval(0, 1, closed='both')}\r\n```\r\npositivity of e.g. `max_iter` would be `numbers.Integral: Interval(left=1)`.\n", - "golden_patch": "diff --git a/sklearn/neighbors/nca.py b/sklearn/neighbors/nca.py\n--- a/sklearn/neighbors/nca.py\n+++ b/sklearn/neighbors/nca.py\n@@ -13,6 +13,7 @@\n import numpy as np\n import sys\n import time\n+import numbers\n from scipy.optimize import minimize\n from ..utils.extmath import softmax\n from ..metrics import pairwise_distances\n@@ -299,7 +300,8 @@ def _validate_params(self, X, y):\n \n # Check the preferred dimensionality of the projected space\n if self.n_components is not None:\n- check_scalar(self.n_components, 'n_components', int, 1)\n+ check_scalar(\n+ self.n_components, 'n_components', numbers.Integral, 1)\n \n if self.n_components > X.shape[1]:\n raise ValueError('The preferred dimensionality of the '\n@@ -318,9 +320,9 @@ def _validate_params(self, X, y):\n .format(X.shape[1],\n self.components_.shape[1]))\n \n- check_scalar(self.max_iter, 'max_iter', int, 1)\n- check_scalar(self.tol, 'tol', float, 0.)\n- check_scalar(self.verbose, 'verbose', int, 0)\n+ check_scalar(self.max_iter, 'max_iter', numbers.Integral, 1)\n+ check_scalar(self.tol, 'tol', numbers.Real, 0.)\n+ check_scalar(self.verbose, 'verbose', numbers.Integral, 0)\n \n if self.callback is not None:\n if not callable(self.callback):\n", - "expected_spans": { - "sklearn/neighbors/nca.py": [ - "imports", - "NeighborhoodComponentsAnalysis._validate_params" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "pytest-dev__pytest-5495", - "repo": "pytest-dev/pytest", - "base_commit": "1aefb24b37c30fba8fd79a744829ca16e252f340", - "problem_statement": "Confusing assertion rewriting message with byte strings\nThe comparison with assertion rewriting for byte strings is confusing: \r\n```\r\n def test_b():\r\n> assert b\"\" == b\"42\"\r\nE AssertionError: assert b'' == b'42'\r\nE Right contains more items, first extra item: 52\r\nE Full diff:\r\nE - b''\r\nE + b'42'\r\nE ? ++\r\n```\r\n\r\n52 is the ASCII ordinal of \"4\" here.\r\n\r\nIt became clear to me when using another example:\r\n\r\n```\r\n def test_b():\r\n> assert b\"\" == b\"1\"\r\nE AssertionError: assert b'' == b'1'\r\nE Right contains more items, first extra item: 49\r\nE Full diff:\r\nE - b''\r\nE + b'1'\r\nE ? +\r\n```\r\n\r\nNot sure what should/could be done here.\n", - "golden_patch": "diff --git a/src/_pytest/assertion/util.py b/src/_pytest/assertion/util.py\n--- a/src/_pytest/assertion/util.py\n+++ b/src/_pytest/assertion/util.py\n@@ -254,17 +254,38 @@ def _compare_eq_iterable(left, right, verbose=0):\n \n \n def _compare_eq_sequence(left, right, verbose=0):\n+ comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes)\n explanation = []\n len_left = len(left)\n len_right = len(right)\n for i in range(min(len_left, len_right)):\n if left[i] != right[i]:\n+ if comparing_bytes:\n+ # when comparing bytes, we want to see their ascii representation\n+ # instead of their numeric values (#5260)\n+ # using a slice gives us the ascii representation:\n+ # >>> s = b'foo'\n+ # >>> s[0]\n+ # 102\n+ # >>> s[0:1]\n+ # b'f'\n+ left_value = left[i : i + 1]\n+ right_value = right[i : i + 1]\n+ else:\n+ left_value = left[i]\n+ right_value = right[i]\n+\n explanation += [\n- \"At index {} diff: {!r} != {!r}\".format(i, left[i], right[i])\n+ \"At index {} diff: {!r} != {!r}\".format(i, left_value, right_value)\n ]\n break\n- len_diff = len_left - len_right\n \n+ if comparing_bytes:\n+ # when comparing bytes, it doesn't help to show the \"sides contain one or more items\"\n+ # longer explanation, so skip it\n+ return explanation\n+\n+ len_diff = len_left - len_right\n if len_diff:\n if len_diff > 0:\n dir_with_more = \"Left\"\n", - "expected_spans": { - "src/_pytest/assertion/util.py": [ - "_compare_eq_sequence" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-17139", - "repo": "sympy/sympy", - "base_commit": "1d3327b8e90a186df6972991963a5ae87053259d", - "problem_statement": "simplify(cos(x)**I): Invalid comparison of complex I (fu.py)\n```\r\n>>> from sympy import *\r\n>>> x = Symbol('x')\r\n>>> print(simplify(cos(x)**I))\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"/home/e/se/sympy/simplify/simplify.py\", line 587, in simplify\r\n expr = trigsimp(expr, deep=True)\r\n File \"/home/e/se/sympy/simplify/trigsimp.py\", line 508, in trigsimp\r\n return trigsimpfunc(expr)\r\n File \"/home/e/se/sympy/simplify/trigsimp.py\", line 501, in \r\n 'matching': (lambda x: futrig(x)),\r\n File \"/home/e/se/sympy/simplify/trigsimp.py\", line 1101, in futrig\r\n e = bottom_up(e, lambda x: _futrig(x, **kwargs))\r\n File \"/home/e/se/sympy/simplify/simplify.py\", line 1081, in bottom_up\r\n rv = F(rv)\r\n File \"/home/e/se/sympy/simplify/trigsimp.py\", line 1101, in \r\n e = bottom_up(e, lambda x: _futrig(x, **kwargs))\r\n File \"/home/e/se/sympy/simplify/trigsimp.py\", line 1169, in _futrig\r\n e = greedy(tree, objective=Lops)(e)\r\n File \"/home/e/se/sympy/strategies/core.py\", line 115, in minrule\r\n return min([rule(expr) for rule in rules], key=objective)\r\n File \"/home/e/se/sympy/strategies/core.py\", line 115, in \r\n return min([rule(expr) for rule in rules], key=objective)\r\n File \"/home/e/se/sympy/strategies/core.py\", line 44, in chain_rl\r\n expr = rule(expr)\r\n File \"/home/e/se/sympy/simplify/fu.py\", line 566, in TR6\r\n return _TR56(rv, cos, sin, lambda x: 1 - x, max=max, pow=pow)\r\n File \"/home/e/se/sympy/simplify/fu.py\", line 524, in _TR56\r\n return bottom_up(rv, _f)\r\n File \"/home/e/se/sympy/simplify/simplify.py\", line 1081, in bottom_up\r\n rv = F(rv)\r\n File \"/home/e/se/sympy/simplify/fu.py\", line 504, in _f\r\n if (rv.exp < 0) == True:\r\n File \"/home/e/se/sympy/core/expr.py\", line 406, in __lt__\r\n raise TypeError(\"Invalid comparison of complex %s\" % me)\r\nTypeError: Invalid comparison of complex I\r\n```\n", - "golden_patch": "diff --git a/sympy/simplify/fu.py b/sympy/simplify/fu.py\n--- a/sympy/simplify/fu.py\n+++ b/sympy/simplify/fu.py\n@@ -500,6 +500,8 @@ def _f(rv):\n # change is not going to allow a simplification as far as I can tell.\n if not (rv.is_Pow and rv.base.func == f):\n return rv\n+ if not rv.exp.is_real:\n+ return rv\n \n if (rv.exp < 0) == True:\n return rv\n", - "expected_spans": { - "sympy/simplify/fu.py": [] - }, - "resolved_by": [ + }, { - "name": "20240530_autocoderover-v20240408", - "patch": "diff --git a/sympy/simplify/fu.py b/sympy/simplify/fu.py\nindex ad1130ef96..471fea6b48 100644\n--- a/sympy/simplify/fu.py\n+++ b/sympy/simplify/fu.py\n@@ -501,6 +501,9 @@ def _f(rv):\n if not (rv.is_Pow and rv.base.func == f):\n return rv\n \n+ # Check if the exponent is real before comparing\n+ if isinstance(rv.exp, Expr) and not rv.exp.is_real:\n+ return rv\n if (rv.exp < 0) == True:\n return rv\n if (rv.exp > max) == True:\n" + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + }, + "alternative_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/sympy/simplify/fu.py b/sympy/simplify/fu.py\nindex ad1130ef96..d1ac8b312d 100644\n--- a/sympy/simplify/fu.py\n+++ b/sympy/simplify/fu.py\n@@ -493,14 +493,15 @@ def _TR56(rv, f, g, h, max, pow):\n (1 - cos(x)**2)**4\n \"\"\"\n \n- def _f(rv):\n- # I'm not sure if this transformation should target all even powers\n- # or only those expressible as powers of 2. Also, should it only\n- # make the changes in powers that appear in sums -- making an isolated\n- # change is not going to allow a simplification as far as I can tell.\n+ def _f(rv, f, g, h, max, pow):\n+ from sympy.core.numbers import ImaginaryUnit\n if not (rv.is_Pow and rv.base.func == f):\n return rv\n \n+ # Check if the exponent is a complex number (specifically ImaginaryUnit)\n+ if isinstance(rv.exp, ImaginaryUnit) or (rv.exp.is_complex and rv.exp.as_real_imag()[1] != 0):\n+ return rv\n+\n if (rv.exp < 0) == True:\n return rv\n if (rv.exp > max) == True:\n@@ -521,7 +522,7 @@ def _f(rv):\n e = rv.exp//2\n return h(g(rv.base.args[0])**2)**e\n \n- return bottom_up(rv, _f)\n+ return bottom_up(rv, lambda x: _f(x, f, g, h, max, pow))\n \n \n def TR5(rv, max=4, pow=False):\n" + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + }, + "alternative_spans": { + "sympy/core/numbers.py": [ + "Float.__new__" + ] + } } ], "alternative_spans": [] }, { - "instance_id": "django__django-11564", - "repo": "django/django", - "base_commit": "580e644f24f1c5ae5b94784fb73a9953a178fd26", - "problem_statement": "Add support for SCRIPT_NAME in STATIC_URL and MEDIA_URL\nDescription\n\t \n\t\t(last modified by Rostyslav Bryzgunov)\n\t \nBy default, {% static '...' %} tag just appends STATIC_URL in the path. When running on sub-path, using SCRIPT_NAME WSGI param, it results in incorrect static URL - it doesn't prepend SCRIPT_NAME prefix.\nThis problem can be solved with prepending SCRIPT_NAME to STATIC_URL in settings.py but that doesn't work when SCRIPT_NAME is a dynamic value.\nThis can be easily added into default Django static tag and django.contrib.staticfiles tag as following:\ndef render(self, context):\n\turl = self.url(context)\n\t# Updating url here with request.META['SCRIPT_NAME'] \n\tif self.varname is None:\n\t\treturn url\n\tcontext[self.varname] = url\n\t\treturn ''\nOn more research I found that FileSystemStorage and StaticFilesStorage ignores SCRIPT_NAME as well. \nWe might have to do a lot of changes but I think it's worth the efforts.\n", - "golden_patch": "diff --git a/django/conf/__init__.py b/django/conf/__init__.py\n--- a/django/conf/__init__.py\n+++ b/django/conf/__init__.py\n@@ -15,7 +15,8 @@\n \n import django\n from django.conf import global_settings\n-from django.core.exceptions import ImproperlyConfigured\n+from django.core.exceptions import ImproperlyConfigured, ValidationError\n+from django.core.validators import URLValidator\n from django.utils.deprecation import RemovedInDjango40Warning\n from django.utils.functional import LazyObject, empty\n \n@@ -109,6 +110,26 @@ def configure(self, default_settings=global_settings, **options):\n setattr(holder, name, value)\n self._wrapped = holder\n \n+ @staticmethod\n+ def _add_script_prefix(value):\n+ \"\"\"\n+ Add SCRIPT_NAME prefix to relative paths.\n+\n+ Useful when the app is being served at a subpath and manually prefixing\n+ subpath to STATIC_URL and MEDIA_URL in settings is inconvenient.\n+ \"\"\"\n+ # Don't apply prefix to valid URLs.\n+ try:\n+ URLValidator()(value)\n+ return value\n+ except (ValidationError, AttributeError):\n+ pass\n+ # Don't apply prefix to absolute paths.\n+ if value.startswith('/'):\n+ return value\n+ from django.urls import get_script_prefix\n+ return '%s%s' % (get_script_prefix(), value)\n+\n @property\n def configured(self):\n \"\"\"Return True if the settings have already been configured.\"\"\"\n@@ -128,6 +149,14 @@ def PASSWORD_RESET_TIMEOUT_DAYS(self):\n )\n return self.__getattr__('PASSWORD_RESET_TIMEOUT_DAYS')\n \n+ @property\n+ def STATIC_URL(self):\n+ return self._add_script_prefix(self.__getattr__('STATIC_URL'))\n+\n+ @property\n+ def MEDIA_URL(self):\n+ return self._add_script_prefix(self.__getattr__('MEDIA_URL'))\n+\n \n class Settings:\n def __init__(self, settings_module):\n", + "instance_id": "sympy__sympy-13480", + "repo": "sympy/sympy", + "base_commit": "f57fe3f4b3f2cab225749e1b3b38ae1bf80b62f0", + "problem_statement": ".subs on coth(log(tan(x))) errors for certain integral values\n >>> from sympy import *\r\n >>> x = Symbol('x')\r\n >>> e = coth(log(tan(x)))\r\n >>> print(e.subs(x, 2))\r\n ...\r\n File \"C:\\Users\\E\\Desktop\\sympy-master\\sympy\\functions\\elementary\\hyperbolic.py\", line 590, in eval\r\n if cotm is S.ComplexInfinity:\r\n NameError: name 'cotm' is not defined\r\n\r\nFails for 2, 3, 5, 6, 8, 9, 11, 12, 13, 15, 18, ... etc.\n", + "golden_patch": "diff --git a/sympy/functions/elementary/hyperbolic.py b/sympy/functions/elementary/hyperbolic.py\n--- a/sympy/functions/elementary/hyperbolic.py\n+++ b/sympy/functions/elementary/hyperbolic.py\n@@ -587,7 +587,7 @@ def eval(cls, arg):\n x, m = _peeloff_ipi(arg)\n if m:\n cothm = coth(m)\n- if cotm is S.ComplexInfinity:\n+ if cothm is S.ComplexInfinity:\n return coth(x)\n else: # cothm == 0\n return tanh(x)\n", + "test_patch": "diff --git a/sympy/functions/elementary/tests/test_hyperbolic.py b/sympy/functions/elementary/tests/test_hyperbolic.py\n--- a/sympy/functions/elementary/tests/test_hyperbolic.py\n+++ b/sympy/functions/elementary/tests/test_hyperbolic.py\n@@ -272,6 +272,8 @@ def test_coth():\n \n assert coth(k*pi*I) == -cot(k*pi)*I\n \n+ assert coth(log(tan(2))) == coth(log(-tan(2)))\n+ assert coth(1 + I*pi/2) == tanh(1)\n \n def test_coth_series():\n x = Symbol('x')\n", + "fail_to_pass": "[\"test_coth\"]", + "pass_to_pass": "[\"test_sinh\", \"test_sinh_series\", \"test_cosh\", \"test_cosh_series\", \"test_tanh\", \"test_tanh_series\", \"test_coth_series\", \"test_csch\", \"test_csch_series\", \"test_sech\", \"test_sech_series\", \"test_asinh\", \"test_asinh_rewrite\", \"test_asinh_series\", \"test_acosh\", \"test_acosh_rewrite\", \"test_acosh_series\", \"test_asech\", \"test_asech_series\", \"test_asech_rewrite\", \"test_acsch\", \"test_acsch_infinities\", \"test_acsch_rewrite\", \"test_atanh\", \"test_atanh_rewrite\", \"test_atanh_series\", \"test_acoth\", \"test_acoth_rewrite\", \"test_acoth_series\", \"test_inverses\", \"test_leading_term\", \"test_complex\", \"test_complex_2899\", \"test_simplifications\", \"test_issue_4136\", \"test_sinh_rewrite\", \"test_cosh_rewrite\", \"test_tanh_rewrite\", \"test_coth_rewrite\", \"test_csch_rewrite\", \"test_sech_rewrite\", \"test_derivs\", \"test_sinh_expansion\"]", "expected_spans": { - "django/conf/__init__.py": [ - "imports", - "LazySettings.configured" + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" ] }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "django__django-11583", - "repo": "django/django", - "base_commit": "60dc957a825232fdda9138e2f8878b2ca407a7c9", - "problem_statement": "Auto-reloading with StatReloader very intermittently throws \"ValueError: embedded null byte\".\nDescription\n\t\nRaising this mainly so that it's tracked, as I have no idea how to reproduce it, nor why it's happening. It ultimately looks like a problem with Pathlib, which wasn't used prior to 2.2.\nStacktrace:\nTraceback (most recent call last):\n File \"manage.py\" ...\n\texecute_from_command_line(sys.argv)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/core/management/__init__.py\", line 381, in execute_from_command_line\n\tutility.execute()\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/core/management/__init__.py\", line 375, in execute\n\tself.fetch_command(subcommand).run_from_argv(self.argv)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/core/management/base.py\", line 323, in run_from_argv\n\tself.execute(*args, **cmd_options)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/core/management/commands/runserver.py\", line 60, in execute\n\tsuper().execute(*args, **options)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/core/management/base.py\", line 364, in execute\n\toutput = self.handle(*args, **options)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/core/management/commands/runserver.py\", line 95, in handle\n\tself.run(**options)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/core/management/commands/runserver.py\", line 102, in run\n\tautoreload.run_with_reloader(self.inner_run, **options)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 577, in run_with_reloader\n\tstart_django(reloader, main_func, *args, **kwargs)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 562, in start_django\n\treloader.run(django_main_thread)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 280, in run\n\tself.run_loop()\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 286, in run_loop\n\tnext(ticker)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 326, in tick\n\tfor filepath, mtime in self.snapshot_files():\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 342, in snapshot_files\n\tfor file in self.watched_files():\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 241, in watched_files\n\tyield from iter_all_python_module_files()\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 103, in iter_all_python_module_files\n\treturn iter_modules_and_files(modules, frozenset(_error_files))\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 132, in iter_modules_and_files\n\tresults.add(path.resolve().absolute())\n File \"/Users/kez/.pyenv/versions/3.6.2/lib/python3.6/pathlib.py\", line 1120, in resolve\n\ts = self._flavour.resolve(self, strict=strict)\n File \"/Users/kez/.pyenv/versions/3.6.2/lib/python3.6/pathlib.py\", line 346, in resolve\n\treturn _resolve(base, str(path)) or sep\n File \"/Users/kez/.pyenv/versions/3.6.2/lib/python3.6/pathlib.py\", line 330, in _resolve\n\ttarget = accessor.readlink(newpath)\n File \"/Users/kez/.pyenv/versions/3.6.2/lib/python3.6/pathlib.py\", line 441, in readlink\n\treturn os.readlink(path)\nValueError: embedded null byte\nI did print(path) before os.readlink(path) in pathlib and ended up with:\n/Users/kez\n/Users/kez/.pyenv\n/Users/kez/.pyenv/versions\n/Users/kez/.pyenv/versions/3.6.2\n/Users/kez/.pyenv/versions/3.6.2/lib\n/Users/kez/.pyenv/versions/3.6.2/lib/python3.6\n/Users/kez/.pyenv/versions/3.6.2/lib/python3.6/asyncio\n/Users/kez/.pyenv/versions/3.6.2/lib/python3.6/asyncio/selector_events.py\n/Users\nIt always seems to be /Users which is last\nIt may have already printed /Users as part of another .resolve() multiple times (that is, the order is not deterministic, and it may have traversed beyond /Users successfully many times during startup.\nI don't know where to begin looking for the rogue null byte, nor why it only exists sometimes.\nBest guess I have is that there's a mountpoint in /Users to a samba share which may not have been connected to yet? I dunno.\nI have no idea if it's fixable without removing the use of pathlib (which tbh I think should happen anyway, because it's slow) and reverting to using os.path.join and friends. \nI have no idea if it's fixed in a later Python version, but with no easy way to reproduce ... dunno how I'd check.\nI have no idea if it's something specific to my system (pyenv, OSX 10.11, etc)\n", - "golden_patch": "diff --git a/django/utils/autoreload.py b/django/utils/autoreload.py\n--- a/django/utils/autoreload.py\n+++ b/django/utils/autoreload.py\n@@ -143,6 +143,10 @@ def iter_modules_and_files(modules, extra_files):\n # The module could have been removed, don't fail loudly if this\n # is the case.\n continue\n+ except ValueError as e:\n+ # Network filesystems may return null bytes in file paths.\n+ logger.debug('\"%s\" raised when resolving path: \"%s\"' % (str(e), path))\n+ continue\n results.add(resolved_path)\n return frozenset(results)\n \n", - "expected_spans": { - "django/utils/autoreload.py": [ - "iter_modules_and_files" + "test_file_spans": { + "sympy/functions/elementary/tests/test_hyperbolic.py": [ + "test_coth" ] }, "resolved_by": [ - { - "name": "20240402_sweagent_gpt4", - "patch": "diff --git a/django/utils/autoreload.py b/django/utils/autoreload.py\nindex 25c3b44250..e0adb01e79 100644\n--- a/django/utils/autoreload.py\n+++ b/django/utils/autoreload.py\n@@ -139,9 +139,10 @@ def iter_modules_and_files(modules, extra_files):\n path = Path(filename)\n try:\n resolved_path = path.resolve(strict=True).absolute()\n- except FileNotFoundError:\n- # The module could have been removed, don't fail loudly if this\n- # is the case.\n+ except (FileNotFoundError, ValueError) as e:\n+ # The module could have been removed or the path could be invalid,\n+ # don't fail loudly if this is the case.\n+ logger.debug('Error resolving path %s: %s', path, e)\n continue\n results.add(resolved_path)\n return frozenset(results)\n" - }, { "name": "20240523_aider", - "patch": "diff --git a/django/utils/autoreload.py b/django/utils/autoreload.py\nindex 25c3b44250..665284df59 100644\n--- a/django/utils/autoreload.py\n+++ b/django/utils/autoreload.py\n@@ -105,6 +105,15 @@ def iter_all_python_module_files():\n return iter_modules_and_files(modules, frozenset(_error_files))\n \n \n+@functools.lru_cache(maxsize=1)\n+def contains_null_byte(path):\n+ \"\"\"Check if the path contains a null byte.\"\"\"\n+ return '\\x00' in str(path)\n+\n+def contains_null_byte(path):\n+ \"\"\"Check if the path contains a null byte.\"\"\"\n+ return '\\x00' in str(path)\n+\n @functools.lru_cache(maxsize=1)\n def iter_modules_and_files(modules, extra_files):\n \"\"\"Iterate through all modules needed to be watched.\"\"\"\n@@ -137,13 +146,16 @@ def iter_modules_and_files(modules, extra_files):\n if not filename:\n continue\n path = Path(filename)\n- try:\n- resolved_path = path.resolve(strict=True).absolute()\n- except FileNotFoundError:\n- # The module could have been removed, don't fail loudly if this\n- # is the case.\n- continue\n- results.add(resolved_path)\n+ if contains_null_byte(path):\n+ logger.warning('Skipping path with embedded null byte: %s', path)\n+ else:\n+ try:\n+ resolved_path = path.resolve(strict=True).absolute()\n+ except FileNotFoundError:\n+ # The module could have been removed, don't fail loudly if this\n+ # is the case.\n+ continue\n+ results.add(resolved_path)\n return frozenset(results)\n \n \n" + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } }, { "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/django/utils/autoreload.py\n+++ b/django/utils/autoreload.py\n@@ -143,6 +143,9 @@\n # The module could have been removed, don't fail loudly if this\n # is the case.\n continue\n+ except ValueError:\n+ logger.warning(f'Failed to resolve path {path} due to an embedded null byte.')\n+ continue\n results.add(resolved_path)\n return frozenset(results)\n \n" + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/django/utils/autoreload.py b/django/utils/autoreload.py\nindex 25c3b44250..336fdefad6 100644\n--- a/django/utils/autoreload.py\n+++ b/django/utils/autoreload.py\n@@ -139,9 +139,8 @@ def iter_modules_and_files(modules, extra_files):\n path = Path(filename)\n try:\n resolved_path = path.resolve(strict=True).absolute()\n- except FileNotFoundError:\n- # The module could have been removed, don't fail loudly if this\n- # is the case.\n+ except (FileNotFoundError, ValueError):\n+ # The module could have been removed or the path is invalid (e.g., contains a null byte), don't fail loudly if this is the case.\n continue\n results.add(resolved_path)\n return frozenset(results)\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "django__django-11620", - "repo": "django/django", - "base_commit": "514efa3129792ec2abb2444f3e7aeb3f21a38386", - "problem_statement": "When DEBUG is True, raising Http404 in a path converter's to_python method does not result in a technical response\nDescription\n\t\nThis is the response I get (plain text): \nA server error occurred. Please contact the administrator.\nI understand a ValueError should be raised which tells the URL resolver \"this path does not match, try next one\" but Http404 is what came to my mind intuitively and the error message was not very helpful.\nOne could also make a point that raising a Http404 should be valid way to tell the resolver \"this is indeed the right path but the current parameter value does not match anything so stop what you are doing and let the handler return the 404 page (including a helpful error message when DEBUG is True instead of the default 'Django tried these URL patterns')\".\nThis would prove useful for example to implement a path converter that uses get_object_or_404.\n", - "golden_patch": "diff --git a/django/views/debug.py b/django/views/debug.py\n--- a/django/views/debug.py\n+++ b/django/views/debug.py\n@@ -5,10 +5,10 @@\n from pathlib import Path\n \n from django.conf import settings\n-from django.http import HttpResponse, HttpResponseNotFound\n+from django.http import Http404, HttpResponse, HttpResponseNotFound\n from django.template import Context, Engine, TemplateDoesNotExist\n from django.template.defaultfilters import pprint\n-from django.urls import Resolver404, resolve\n+from django.urls import resolve\n from django.utils import timezone\n from django.utils.datastructures import MultiValueDict\n from django.utils.encoding import force_str\n@@ -483,7 +483,7 @@ def technical_404_response(request, exception):\n caller = ''\n try:\n resolver_match = resolve(request.path)\n- except Resolver404:\n+ except Http404:\n pass\n else:\n obj = resolver_match.func\n", - "expected_spans": { - "django/views/debug.py": [ - "imports", - "technical_404_response" - ] - }, - "resolved_by": [ - { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/django/urls/resolvers.py b/django/urls/resolvers.py\nindex 2154a46320..180a9697d4 100644\n--- a/django/urls/resolvers.py\n+++ b/django/urls/resolvers.py\n@@ -27,6 +27,7 @@ from .converters import get_converter\n from .exceptions import NoReverseMatch, Resolver404\n from .utils import get_callable\n \n+from django.http import Http404\n \n class ResolverMatch:\n def __init__(self, func, args, kwargs, url_name=None, app_names=None, namespaces=None, route=None):\n@@ -258,7 +259,8 @@ class RoutePattern(CheckURLMixin):\n converter = self.converters[key]\n try:\n kwargs[key] = converter.to_python(value)\n- except ValueError:\n+ except (ValueError, Http404):\n+ return None\n return None\n return path[match.end():], (), kwargs\n return None\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "pytest-dev__pytest-5692", - "repo": "pytest-dev/pytest", - "base_commit": "29e336bd9bf87eaef8e2683196ee1975f1ad4088", - "problem_statement": "Hostname and timestamp properties in generated JUnit XML reports\nPytest enables generating JUnit XML reports of the tests.\r\n\r\nHowever, there are some properties missing, specifically `hostname` and `timestamp` from the `testsuite` XML element. Is there an option to include them?\r\n\r\nExample of a pytest XML report:\r\n```xml\r\n\r\n\r\n\t\r\n\t\r\n\r\n```\r\n\r\nExample of a junit XML report:\r\n```xml\r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n\r\n```\n", - "golden_patch": "diff --git a/src/_pytest/junitxml.py b/src/_pytest/junitxml.py\n--- a/src/_pytest/junitxml.py\n+++ b/src/_pytest/junitxml.py\n@@ -10,9 +10,11 @@\n \"\"\"\n import functools\n import os\n+import platform\n import re\n import sys\n import time\n+from datetime import datetime\n \n import py\n \n@@ -666,6 +668,8 @@ def pytest_sessionfinish(self):\n skipped=self.stats[\"skipped\"],\n tests=numtests,\n time=\"%.3f\" % suite_time_delta,\n+ timestamp=datetime.fromtimestamp(self.suite_start_time).isoformat(),\n+ hostname=platform.node(),\n )\n logfile.write(Junit.testsuites([suite_node]).unicode(indent=0))\n logfile.close()\n", - "expected_spans": { - "src/_pytest/junitxml.py": [ - "imports", - "LogXML.pytest_sessionfinish" - ] - }, - "resolved_by": [ + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } + }, { - "name": "20240402_sweagent_gpt4", - "patch": "diff --git a/src/_pytest/junitxml.py b/src/_pytest/junitxml.py\nindex 15c630b1d..80421edca 100644\n--- a/src/_pytest/junitxml.py\n+++ b/src/_pytest/junitxml.py\n@@ -657,6 +657,11 @@ class LogXML:\n )\n logfile.write('')\n \n+ import socket\n+ import datetime\n+ hostname = socket.gethostname()\n+ timestamp = datetime.datetime.now().isoformat()\n+\n suite_node = Junit.testsuite(\n self._get_global_properties_node(),\n [x.to_xml() for x in self.node_reporters_ordered],\n@@ -666,6 +671,8 @@ class LogXML:\n skipped=self.stats[\"skipped\"],\n tests=numtests,\n time=\"%.3f\" % suite_time_delta,\n+ hostname=hostname,\n+ timestamp=timestamp,\n )\n logfile.write(Junit.testsuites([suite_node]).unicode(indent=0))\n logfile.close()\n" + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } }, { - "name": "20240523_aider", - "patch": "diff --git a/src/_pytest/junitxml.py b/src/_pytest/junitxml.py\nindex 15c630b1d..66472acc2 100644\n--- a/src/_pytest/junitxml.py\n+++ b/src/_pytest/junitxml.py\n@@ -9,6 +9,8 @@ Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/\n src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd\n \"\"\"\n import functools\n+import socket\n+from datetime import datetime\n import os\n import re\n import sys\n@@ -477,6 +479,8 @@ class LogXML:\n logfile = os.path.expanduser(os.path.expandvars(logfile))\n self.logfile = os.path.normpath(os.path.abspath(logfile))\n self.prefix = prefix\n+ self.hostname = socket.gethostname()\n+ self.timestamp = datetime.now().isoformat()\n self.suite_name = suite_name\n self.logging = logging\n self.log_passing_tests = log_passing_tests\n@@ -666,6 +670,8 @@ class LogXML:\n skipped=self.stats[\"skipped\"],\n tests=numtests,\n time=\"%.3f\" % suite_time_delta,\n+ timestamp=self.timestamp,\n+ hostname=self.hostname,\n )\n logfile.write(Junit.testsuites([suite_node]).unicode(indent=0))\n logfile.close()\n" + "name": "20240828_autose_mixed", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/src/_pytest/junitxml.py\n+++ b/src/_pytest/junitxml.py\n@@ -24,6 +24,8 @@\n class Junit(py.xml.Namespace):\n pass\n \n+import datetime\n+import socket\n \n # We need to get the subset of the invalid unicode ranges according to\n # XML 1.0 which are valid in this python build. Hence we calculate\n@@ -644,6 +646,9 @@\n dirname = os.path.dirname(os.path.abspath(self.logfile))\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n+ hostname = socket.gethostname()\n+ timestamp = datetime.datetime.now().isoformat()\n+\n logfile = open(self.logfile, \"w\", encoding=\"utf-8\")\n suite_stop_time = time.time()\n suite_time_delta = suite_stop_time - self.suite_start_time\n@@ -666,6 +671,8 @@\n skipped=self.stats[\"skipped\"],\n tests=numtests,\n time=\"%.3f\" % suite_time_delta,\n+ timestamp=timestamp, # Add this line\n+ hostname=hostname, # And this line\n )\n logfile.write(Junit.testsuites([suite_node]).unicode(indent=0))\n logfile.close()\n" + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/src/_pytest/junitxml.py b/src/_pytest/junitxml.py\nindex 15c630b1d..1571ef1c6 100644\n--- a/src/_pytest/junitxml.py\n+++ b/src/_pytest/junitxml.py\n@@ -13,6 +13,8 @@ import os\n import re\n import sys\n import time\n+import socket\n+from datetime import datetime\n \n import py\n \n@@ -666,6 +668,8 @@ class LogXML:\n skipped=self.stats[\"skipped\"],\n tests=numtests,\n time=\"%.3f\" % suite_time_delta,\n+ hostname=socket.gethostname(),\n+ timestamp=datetime.now().isoformat(),\n )\n logfile.write(Junit.testsuites([suite_node]).unicode(indent=0))\n logfile.close()\n" + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/src/_pytest/junitxml.py b/src/_pytest/junitxml.py\nindex 15c630b1d..90af9a107 100644\n--- a/src/_pytest/junitxml.py\n+++ b/src/_pytest/junitxml.py\n@@ -19,7 +19,8 @@ import py\n import pytest\n from _pytest import nodes\n from _pytest.config import filename_arg\n-\n+import socket\n+from datetime import datetime\n \n class Junit(py.xml.Namespace):\n pass\n@@ -656,7 +657,6 @@ class LogXML:\n - self.cnt_double_fail_tests\n )\n logfile.write('')\n-\n suite_node = Junit.testsuite(\n self._get_global_properties_node(),\n [x.to_xml() for x in self.node_reporters_ordered],\n@@ -666,6 +666,8 @@ class LogXML:\n skipped=self.stats[\"skipped\"],\n tests=numtests,\n time=\"%.3f\" % suite_time_delta,\n+ timestamp=datetime.now().isoformat(),\n+ hostname=socket.gethostname(),\n )\n logfile.write(Junit.testsuites([suite_node]).unicode(indent=0))\n logfile.close()\n" + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "diff --git a/src/_pytest/junitxml.py b/src/_pytest/junitxml.py\nindex 15c630b1d..80421edca 100644\n--- a/src/_pytest/junitxml.py\n+++ b/src/_pytest/junitxml.py\n@@ -657,6 +657,11 @@ class LogXML:\n )\n logfile.write('')\n \n+ import socket\n+ import datetime\n+ hostname = socket.gethostname()\n+ timestamp = datetime.datetime.now().isoformat()\n+\n suite_node = Junit.testsuite(\n self._get_global_properties_node(),\n [x.to_xml() for x in self.node_reporters_ordered],\n@@ -666,6 +671,8 @@ class LogXML:\n skipped=self.stats[\"skipped\"],\n tests=numtests,\n time=\"%.3f\" % suite_time_delta,\n+ hostname=hostname,\n+ timestamp=timestamp,\n )\n logfile.write(Junit.testsuites([suite_node]).unicode(indent=0))\n logfile.close()\n" - } - ], - "alternative_spans": [ + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } + }, { - "run_name": "20240402_sweagent_gpt4", - "spans": { - "src/_pytest/junitxml.py": [ - "LogXML.pytest_sessionfinish" + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" ] } }, { - "run_name": "20240524_opencsg_starship_gpt4", - "spans": { - "src/_pytest/junitxml.py": [ - "LogXML.pytest_sessionfinish" + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" ] } }, { - "run_name": "20240527_marscode-agent-dev", - "spans": { - "src/_pytest/junitxml.py": [ - "LogXML.pytest_sessionfinish" + "name": "20240820_honeycomb", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ], + "sympy/functions/elementary/tests/test_hyperbolic.py": [ + "test_coth_series" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" ] } }, { - "run_name": "20240604_CodeR", - "spans": { - "src/_pytest/junitxml.py": [ - "LogXML.pytest_sessionfinish" + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" ] } }, { - "run_name": "20240612_IBM_Research_Agent101", - "spans": { - "src/_pytest/junitxml.py": [ - "LogXML.pytest_sessionfinish" + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" ] } - } - ] - }, - { - "instance_id": "django__django-11630", - "repo": "django/django", - "base_commit": "65e86948b80262574058a94ccaae3a9b59c3faea", - "problem_statement": "Django throws error when different apps with different models have the same name table name.\nDescription\n\t\nError message:\ntable_name: (models.E028) db_table 'table_name' is used by multiple models: base.ModelName, app2.ModelName.\nWe have a Base app that points to a central database and that has its own tables. We then have multiple Apps that talk to their own databases. Some share the same table names.\nWe have used this setup for a while, but after upgrading to Django 2.2 we're getting an error saying we're not allowed 2 apps, with 2 different models to have the same table names. \nIs this correct behavior? We've had to roll back to Django 2.0 for now.\n", - "golden_patch": "diff --git a/django/core/checks/model_checks.py b/django/core/checks/model_checks.py\n--- a/django/core/checks/model_checks.py\n+++ b/django/core/checks/model_checks.py\n@@ -4,7 +4,8 @@\n from itertools import chain\n \n from django.apps import apps\n-from django.core.checks import Error, Tags, register\n+from django.conf import settings\n+from django.core.checks import Error, Tags, Warning, register\n \n \n @register(Tags.models)\n@@ -35,14 +36,25 @@ def check_all_models(app_configs=None, **kwargs):\n indexes[model_index.name].append(model._meta.label)\n for model_constraint in model._meta.constraints:\n constraints[model_constraint.name].append(model._meta.label)\n+ if settings.DATABASE_ROUTERS:\n+ error_class, error_id = Warning, 'models.W035'\n+ error_hint = (\n+ 'You have configured settings.DATABASE_ROUTERS. Verify that %s '\n+ 'are correctly routed to separate databases.'\n+ )\n+ else:\n+ error_class, error_id = Error, 'models.E028'\n+ error_hint = None\n for db_table, model_labels in db_table_models.items():\n if len(model_labels) != 1:\n+ model_labels_str = ', '.join(model_labels)\n errors.append(\n- Error(\n+ error_class(\n \"db_table '%s' is used by multiple models: %s.\"\n- % (db_table, ', '.join(db_table_models[db_table])),\n+ % (db_table, model_labels_str),\n obj=db_table,\n- id='models.E028',\n+ hint=(error_hint % model_labels_str) if error_hint else None,\n+ id=error_id,\n )\n )\n for index_name, model_labels in indexes.items():\n", - "expected_spans": { - "django/core/checks/model_checks.py": [ - "check_all_models" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "django__django-11742", - "repo": "django/django", - "base_commit": "fee75d2aed4e58ada6567c464cfd22e89dc65f4a", - "problem_statement": "Add check to ensure max_length fits longest choice.\nDescription\n\t\nThere is currently no check to ensure that Field.max_length is large enough to fit the longest value in Field.choices.\nThis would be very helpful as often this mistake is not noticed until an attempt is made to save a record with those values that are too long.\n", - "golden_patch": "diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -257,6 +257,7 @@ def is_value(value, accept_promise=True):\n )\n ]\n \n+ choice_max_length = 0\n # Expect [group_name, [value, display]]\n for choices_group in self.choices:\n try:\n@@ -270,16 +271,32 @@ def is_value(value, accept_promise=True):\n for value, human_name in group_choices\n ):\n break\n+ if self.max_length is not None and group_choices:\n+ choice_max_length = max(\n+ choice_max_length,\n+ *(len(value) for value, _ in group_choices if isinstance(value, str)),\n+ )\n except (TypeError, ValueError):\n # No groups, choices in the form [value, display]\n value, human_name = group_name, group_choices\n if not is_value(value) or not is_value(human_name):\n break\n+ if self.max_length is not None and isinstance(value, str):\n+ choice_max_length = max(choice_max_length, len(value))\n \n # Special case: choices=['ab']\n if isinstance(choices_group, str):\n break\n else:\n+ if self.max_length is not None and choice_max_length > self.max_length:\n+ return [\n+ checks.Error(\n+ \"'max_length' is too small to fit the longest value \"\n+ \"in 'choices' (%d characters).\" % choice_max_length,\n+ obj=self,\n+ id='fields.E009',\n+ ),\n+ ]\n return []\n \n return [\n", - "expected_spans": { - "django/db/models/fields/__init__.py": [ - "Field._check_choices" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "scikit-learn__scikit-learn-14894", - "repo": "scikit-learn/scikit-learn", - "base_commit": "fdbaa58acbead5a254f2e6d597dc1ab3b947f4c6", - "problem_statement": "ZeroDivisionError in _sparse_fit for SVM with empty support_vectors_\n#### Description\r\nWhen using sparse data, in the case where the support_vectors_ attribute is be empty, _fit_sparse gives a ZeroDivisionError\r\n\r\n#### Steps/Code to Reproduce\r\n```\r\nimport numpy as np\r\nimport scipy\r\nimport sklearn\r\nfrom sklearn.svm import SVR\r\nx_train = np.array([[0, 1, 0, 0],\r\n[0, 0, 0, 1],\r\n[0, 0, 1, 0],\r\n[0, 0, 0, 1]])\r\ny_train = np.array([0.04, 0.04, 0.10, 0.16])\r\nmodel = SVR(C=316.227766017, cache_size=200, coef0=0.0, degree=3, epsilon=0.1,\r\n \t gamma=1.0, kernel='linear', max_iter=15000,\r\n \t shrinking=True, tol=0.001, verbose=False)\r\n# dense x_train has no error\r\nmodel.fit(x_train, y_train)\r\n\r\n# convert to sparse\r\nxtrain= scipy.sparse.csr_matrix(x_train)\r\nmodel.fit(xtrain, y_train)\r\n\r\n```\r\n#### Expected Results\r\nNo error is thrown and `self.dual_coef_ = sp.csr_matrix([])`\r\n\r\n#### Actual Results\r\n```\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"/usr/local/lib/python3.5/dist-packages/sklearn/svm/base.py\", line 209, in fit\r\n fit(X, y, sample_weight, solver_type, kernel, random_seed=seed)\r\n File \"/usr/local/lib/python3.5/dist-packages/sklearn/svm/base.py\", line 302, in _sparse_fit\r\n dual_coef_indices.size / n_class)\r\nZeroDivisionError: float division by zero\r\n```\r\n\r\n#### Versions\r\n```\r\n>>> sklearn.show_versions() \r\n\r\nSystem:\r\nexecutable: /usr/bin/python3\r\n python: 3.5.2 (default, Nov 12 2018, 13:43:14) [GCC 5.4.0 20160609]\r\n machine: Linux-4.15.0-58-generic-x86_64-with-Ubuntu-16.04-xenial\r\n\r\nPython deps:\r\n numpy: 1.17.0\r\n Cython: None\r\n pip: 19.2.1\r\n pandas: 0.22.0\r\n sklearn: 0.21.3\r\n scipy: 1.3.0\r\nsetuptools: 40.4.3\r\n```\n", - "golden_patch": "diff --git a/sklearn/svm/base.py b/sklearn/svm/base.py\n--- a/sklearn/svm/base.py\n+++ b/sklearn/svm/base.py\n@@ -287,11 +287,14 @@ def _sparse_fit(self, X, y, sample_weight, solver_type, kernel,\n n_SV = self.support_vectors_.shape[0]\n \n dual_coef_indices = np.tile(np.arange(n_SV), n_class)\n- dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n- dual_coef_indices.size / n_class)\n- self.dual_coef_ = sp.csr_matrix(\n- (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n- (n_class, n_SV))\n+ if not n_SV:\n+ self.dual_coef_ = sp.csr_matrix([])\n+ else:\n+ dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n+ dual_coef_indices.size / n_class)\n+ self.dual_coef_ = sp.csr_matrix(\n+ (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n+ (n_class, n_SV))\n \n def predict(self, X):\n \"\"\"Perform regression on samples in X.\n", - "expected_spans": { - "sklearn/svm/base.py": [ - "BaseLibSVM._sparse_fit" - ] - }, - "resolved_by": [ + }, { - "name": "20240402_sweagent_claude3opus", - "patch": "diff --git a/sklearn/svm/base.py b/sklearn/svm/base.py\nindex 64cebe837..77502952e 100644\n--- a/sklearn/svm/base.py\n+++ b/sklearn/svm/base.py\n@@ -287,11 +287,15 @@ class BaseLibSVM(BaseEstimator, metaclass=ABCMeta):\n n_SV = self.support_vectors_.shape[0]\n \n dual_coef_indices = np.tile(np.arange(n_SV), n_class)\n- dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n- dual_coef_indices.size / n_class)\n- self.dual_coef_ = sp.csr_matrix(\n- (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n- (n_class, n_SV))\n+ if n_SV == 0:\n+ self.dual_coef_ = sp.csr_matrix([])\n+ else:\n+ dual_coef_indices = np.tile(np.arange(n_SV), n_class)\n+ dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n+ dual_coef_indices.size / n_class)\n+ self.dual_coef_ = sp.csr_matrix(\n+ (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n+ (n_class, n_SV))\n \n def predict(self, X):\n \"\"\"Perform regression on samples in X.\n" + "name": "20240829_Isoform", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } }, { - "name": "20240402_sweagent_gpt4", - "patch": "diff --git a/sklearn/svm/base.py b/sklearn/svm/base.py\nindex 64cebe837..347c460a9 100644\n--- a/sklearn/svm/base.py\n+++ b/sklearn/svm/base.py\n@@ -287,11 +287,14 @@ class BaseLibSVM(BaseEstimator, metaclass=ABCMeta):\n n_SV = self.support_vectors_.shape[0]\n \n dual_coef_indices = np.tile(np.arange(n_SV), n_class)\n- dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n- dual_coef_indices.size / n_class)\n- self.dual_coef_ = sp.csr_matrix(\n- (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n- (n_class, n_SV))\n+ if dual_coef_indices.size == 0:\n+ # Create an empty sparse matrix with 0 non-zero elements\n+ self.dual_coef_ = sp.csr_matrix((0, n_SV), dtype=np.float64)\n+ else:\n+ dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1, dual_coef_indices.size // n_class, dtype=np.int32)\n+ self.dual_coef_ = sp.csr_matrix(\n+ (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n+ (n_class, n_SV))\n \n def predict(self, X):\n \"\"\"Perform regression on samples in X.\n" + "name": "20240604_CodeR", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } }, { - "name": "20240509_amazon-q-developer-agent-20240430-dev", - "patch": "--- a/sklearn/svm/base.py\n+++ b/sklearn/svm/base.py\n@@ -283,18 +283,21 @@\n if hasattr(self, \"classes_\"):\n n_class = len(self.classes_) - 1\n else: # regression\n n_class = 1\n n_SV = self.support_vectors_.shape[0]\n \n- dual_coef_indices = np.tile(np.arange(n_SV), n_class)\n- dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n- dual_coef_indices.size / n_class)\n- self.dual_coef_ = sp.csr_matrix(\n- (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n- (n_class, n_SV))\n+ if n_SV == 0:\n+ self.dual_coef_ = sp.csr_matrix([])\n+ else:\n+ dual_coef_indices = np.tile(np.arange(n_SV), n_class)\n+ dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n+ dual_coef_indices.size // n_class)\n+ self.dual_coef_ = sp.csr_matrix(\n+ (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n+ (n_class, n_SV))\n \n def predict(self, X):\n \"\"\"Perform regression on samples in X.\n \n For an one-class model, +1 (inlier) or -1 (outlier) is returned.\n \n" + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } }, { - "name": "20240523_aider", - "patch": "diff --git a/sklearn/svm/base.py b/sklearn/svm/base.py\nindex 64cebe837..7eda7360e 100644\n--- a/sklearn/svm/base.py\n+++ b/sklearn/svm/base.py\n@@ -286,12 +286,15 @@ class BaseLibSVM(BaseEstimator, metaclass=ABCMeta):\n n_class = 1\n n_SV = self.support_vectors_.shape[0]\n \n- dual_coef_indices = np.tile(np.arange(n_SV), n_class)\n- dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n- dual_coef_indices.size / n_class)\n- self.dual_coef_ = sp.csr_matrix(\n- (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n- (n_class, n_SV))\n+ if n_SV == 0:\n+ self.dual_coef_ = sp.csr_matrix((0, 0))\n+ else:\n+ dual_coef_indices = np.tile(np.arange(n_SV), n_class)\n+ dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n+ dual_coef_indices.size / n_class)\n+ self.dual_coef_ = sp.csr_matrix(\n+ (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n+ (n_class, n_SV))\n \n def predict(self, X):\n \"\"\"Perform regression on samples in X.\n" + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/sklearn/svm/base.py b/sklearn/svm/base.py\nindex 64cebe837..74a48ba43 100644\n--- a/sklearn/svm/base.py\n+++ b/sklearn/svm/base.py\n@@ -286,12 +286,15 @@ class BaseLibSVM(BaseEstimator, metaclass=ABCMeta):\n n_class = 1\n n_SV = self.support_vectors_.shape[0]\n \n- dual_coef_indices = np.tile(np.arange(n_SV), n_class)\n- dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n- dual_coef_indices.size / n_class)\n- self.dual_coef_ = sp.csr_matrix(\n- (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n- (n_class, n_SV))\n+ if n_class > 0 and n_SV > 0:\n+ dual_coef_indices = np.tile(np.arange(n_SV), n_class)\n+ dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n+ int(dual_coef_indices.size / n_class))\n+ self.dual_coef_ = sp.csr_matrix(\n+ (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n+ (n_class, n_SV))\n+ else:\n+ self.dual_coef_ = sp.csr_matrix((0, n_SV))\n \n def predict(self, X):\n \"\"\"Perform regression on samples in X.\n" + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/sklearn/svm/base.py b/sklearn/svm/base.py\nindex 64cebe837..42b492953 100644\n--- a/sklearn/svm/base.py\n+++ b/sklearn/svm/base.py\n@@ -280,12 +280,18 @@ class BaseLibSVM(BaseEstimator, metaclass=ABCMeta):\n \n self._warn_from_fit_status()\n \n- if hasattr(self, \"classes_\"):\n- n_class = len(self.classes_) - 1\n- else: # regression\n+ # Check if it's a regression task or if there's only one class\n+ if not hasattr(self, \"classes_\") or len(self.classes_) <= 1:\n n_class = 1\n+ else:\n+ n_class = len(self.classes_) - 1\n+ # Ensure n_class is never zero to avoid ZeroDivisionError\n+ n_class = max(n_class, 1)\n+ # If n_class is 1, we do not need to calculate dual_coef_indptr\n+ if n_class == 1:\n+ self.dual_coef_ = sp.csr_matrix(dual_coef_data)\n+ return\n n_SV = self.support_vectors_.shape[0]\n-\n dual_coef_indices = np.tile(np.arange(n_SV), n_class)\n dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n dual_coef_indices.size / n_class)\n" + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } }, { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/sklearn/svm/base.py b/sklearn/svm/base.py\nindex 64cebe837..e59affefb 100644\n--- a/sklearn/svm/base.py\n+++ b/sklearn/svm/base.py\n@@ -286,12 +286,15 @@ class BaseLibSVM(BaseEstimator, metaclass=ABCMeta):\n n_class = 1\n n_SV = self.support_vectors_.shape[0]\n \n- dual_coef_indices = np.tile(np.arange(n_SV), n_class)\n- dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n- dual_coef_indices.size / n_class)\n- self.dual_coef_ = sp.csr_matrix(\n- (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n- (n_class, n_SV))\n+ if n_SV == 0:\n+ self.dual_coef_ = sp.csr_matrix((n_class, 0))\n+ else:\n+ dual_coef_indices = np.tile(np.arange(n_SV), n_class)\n+ dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n+ dual_coef_indices.size / n_class)\n+ self.dual_coef_ = sp.csr_matrix(\n+ (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n+ (n_class, n_SV))\n \n def predict(self, X):\n \"\"\"Perform regression on samples in X.\n" + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } }, { "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/sklearn/svm/base.py b/sklearn/svm/base.py\nindex 64cebe837..2067ebd28 100644\n--- a/sklearn/svm/base.py\n+++ b/sklearn/svm/base.py\n@@ -286,12 +286,16 @@ class BaseLibSVM(BaseEstimator, metaclass=ABCMeta):\n n_class = 1\n n_SV = self.support_vectors_.shape[0]\n \n- dual_coef_indices = np.tile(np.arange(n_SV), n_class)\n- dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n- dual_coef_indices.size / n_class)\n- self.dual_coef_ = sp.csr_matrix(\n- (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n- (n_class, n_SV))\n+ if n_SV == 0:\n+ # If there are no support vectors, create an empty csr_matrix for dual_coef_\n+ self.dual_coef_ = sp.csr_matrix((n_class, 0))\n+ else:\n+ dual_coef_indices = np.tile(np.arange(n_SV), n_class)\n+ dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n+ dual_coef_indices.size / n_class)\n+ self.dual_coef_ = sp.csr_matrix(\n+ (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n+ (n_class, n_SV))\n \n def predict(self, X):\n \"\"\"Perform regression on samples in X.\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "scikit-learn__scikit-learn-14983", - "repo": "scikit-learn/scikit-learn", - "base_commit": "06632c0d185128a53c57ccc73b25b6408e90bb89", - "problem_statement": "RepeatedKFold and RepeatedStratifiedKFold do not show correct __repr__ string\n#### Description\r\n\r\n`RepeatedKFold` and `RepeatedStratifiedKFold` do not show correct \\_\\_repr\\_\\_ string.\r\n\r\n#### Steps/Code to Reproduce\r\n\r\n```python\r\n>>> from sklearn.model_selection import RepeatedKFold, RepeatedStratifiedKFold\r\n>>> repr(RepeatedKFold())\r\n>>> repr(RepeatedStratifiedKFold())\r\n```\r\n\r\n#### Expected Results\r\n\r\n```python\r\n>>> repr(RepeatedKFold())\r\nRepeatedKFold(n_splits=5, n_repeats=10, random_state=None)\r\n>>> repr(RepeatedStratifiedKFold())\r\nRepeatedStratifiedKFold(n_splits=5, n_repeats=10, random_state=None)\r\n```\r\n\r\n#### Actual Results\r\n\r\n```python\r\n>>> repr(RepeatedKFold())\r\n''\r\n>>> repr(RepeatedStratifiedKFold())\r\n''\r\n```\r\n\r\n#### Versions\r\n```\r\nSystem:\r\n python: 3.7.4 (default, Aug 9 2019, 18:34:13) [MSC v.1915 64 bit (AMD64)]\r\nexecutable: D:\\anaconda3\\envs\\xyz\\python.exe\r\n machine: Windows-10-10.0.16299-SP0\r\n\r\nBLAS:\r\n macros:\r\n lib_dirs:\r\ncblas_libs: cblas\r\n\r\nPython deps:\r\n pip: 19.2.2\r\nsetuptools: 41.0.1\r\n sklearn: 0.21.2\r\n numpy: 1.16.4\r\n scipy: 1.3.1\r\n Cython: None\r\n pandas: 0.24.2\r\n```\n", - "golden_patch": "diff --git a/sklearn/model_selection/_split.py b/sklearn/model_selection/_split.py\n--- a/sklearn/model_selection/_split.py\n+++ b/sklearn/model_selection/_split.py\n@@ -1163,6 +1163,9 @@ def get_n_splits(self, X=None, y=None, groups=None):\n **self.cvargs)\n return cv.get_n_splits(X, y, groups) * self.n_repeats\n \n+ def __repr__(self):\n+ return _build_repr(self)\n+\n \n class RepeatedKFold(_RepeatedSplits):\n \"\"\"Repeated K-Fold cross validator.\n@@ -2158,6 +2161,8 @@ def _build_repr(self):\n try:\n with warnings.catch_warnings(record=True) as w:\n value = getattr(self, key, None)\n+ if value is None and hasattr(self, 'cvargs'):\n+ value = self.cvargs.get(key, None)\n if len(w) and w[0].category == DeprecationWarning:\n # if the parameter is deprecated, don't show it\n continue\n", - "expected_spans": { - "sklearn/model_selection/_split.py": [ - "_build_repr" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-17630", - "repo": "sympy/sympy", - "base_commit": "58e78209c8577b9890e957b624466e5beed7eb08", - "problem_statement": "Exception when multiplying BlockMatrix containing ZeroMatrix blocks\nWhen a block matrix with zero blocks is defined\r\n\r\n```\r\n>>> from sympy import *\r\n>>> a = MatrixSymbol(\"a\", 2, 2)\r\n>>> z = ZeroMatrix(2, 2)\r\n>>> b = BlockMatrix([[a, z], [z, z]])\r\n```\r\n\r\nthen block-multiplying it once seems to work fine:\r\n\r\n```\r\n>>> block_collapse(b * b)\r\nMatrix([\r\n[a**2, 0],\r\n[0, 0]])\r\n>>> b._blockmul(b)\r\nMatrix([\r\n[a**2, 0],\r\n[0, 0]])\r\n```\r\n\r\nbut block-multiplying twice throws an exception:\r\n\r\n```\r\n>>> block_collapse(b * b * b)\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/matrices/expressions/blockmatrix.py\", line 297, in block_collapse\r\n result = rule(expr)\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/strategies/core.py\", line 11, in exhaustive_rl\r\n new, old = rule(expr), expr\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/strategies/core.py\", line 44, in chain_rl\r\n expr = rule(expr)\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/strategies/core.py\", line 11, in exhaustive_rl\r\n new, old = rule(expr), expr\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/strategies/core.py\", line 33, in conditioned_rl\r\n return rule(expr)\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/strategies/core.py\", line 95, in switch_rl\r\n return rl(expr)\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/matrices/expressions/blockmatrix.py\", line 361, in bc_matmul\r\n matrices[i] = A._blockmul(B)\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/matrices/expressions/blockmatrix.py\", line 91, in _blockmul\r\n self.colblocksizes == other.rowblocksizes):\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/matrices/expressions/blockmatrix.py\", line 80, in colblocksizes\r\n return [self.blocks[0, i].cols for i in range(self.blockshape[1])]\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/matrices/expressions/blockmatrix.py\", line 80, in \r\n return [self.blocks[0, i].cols for i in range(self.blockshape[1])]\r\nAttributeError: 'Zero' object has no attribute 'cols'\r\n>>> b._blockmul(b)._blockmul(b)\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/matrices/expressions/blockmatrix.py\", line 91, in _blockmul\r\n self.colblocksizes == other.rowblocksizes):\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/matrices/expressions/blockmatrix.py\", line 80, in colblocksizes\r\n return [self.blocks[0, i].cols for i in range(self.blockshape[1])]\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/matrices/expressions/blockmatrix.py\", line 80, in \r\n return [self.blocks[0, i].cols for i in range(self.blockshape[1])]\r\nAttributeError: 'Zero' object has no attribute 'cols'\r\n```\r\n\r\nThis seems to be caused by the fact that the zeros in `b._blockmul(b)` are not `ZeroMatrix` but `Zero`:\r\n\r\n```\r\n>>> type(b._blockmul(b).blocks[0, 1])\r\n\r\n```\r\n\r\nHowever, I don't understand SymPy internals well enough to find out why this happens. I use Python 3.7.4 and sympy 1.4 (installed with pip).\n", - "golden_patch": "diff --git a/sympy/matrices/expressions/matexpr.py b/sympy/matrices/expressions/matexpr.py\n--- a/sympy/matrices/expressions/matexpr.py\n+++ b/sympy/matrices/expressions/matexpr.py\n@@ -627,6 +627,8 @@ def _postprocessor(expr):\n # manipulate them like non-commutative scalars.\n return cls._from_args(nonmatrices + [mat_class(*matrices).doit(deep=False)])\n \n+ if mat_class == MatAdd:\n+ return mat_class(*matrices).doit(deep=False)\n return mat_class(cls._from_args(nonmatrices), *matrices).doit(deep=False)\n return _postprocessor\n \n", - "expected_spans": { - "sympy/matrices/expressions/matexpr.py": [ - "get_postprocessor" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "django__django-11797", - "repo": "django/django", - "base_commit": "3346b78a8a872286a245d1e77ef4718fc5e6be1a", - "problem_statement": "Filtering on query result overrides GROUP BY of internal query\nDescription\n\t\nfrom django.contrib.auth import models\na = models.User.objects.filter(email__isnull=True).values('email').annotate(m=Max('id')).values('m')\nprint(a.query) # good\n# SELECT MAX(\"auth_user\".\"id\") AS \"m\" FROM \"auth_user\" WHERE \"auth_user\".\"email\" IS NULL GROUP BY \"auth_user\".\"email\"\nprint(a[:1].query) # good\n# SELECT MAX(\"auth_user\".\"id\") AS \"m\" FROM \"auth_user\" WHERE \"auth_user\".\"email\" IS NULL GROUP BY \"auth_user\".\"email\" LIMIT 1\nb = models.User.objects.filter(id=a[:1])\nprint(b.query) # GROUP BY U0.\"id\" should be GROUP BY U0.\"email\"\n# SELECT ... FROM \"auth_user\" WHERE \"auth_user\".\"id\" = (SELECT U0.\"id\" FROM \"auth_user\" U0 WHERE U0.\"email\" IS NULL GROUP BY U0.\"id\" LIMIT 1)\n", - "golden_patch": "diff --git a/django/db/models/lookups.py b/django/db/models/lookups.py\n--- a/django/db/models/lookups.py\n+++ b/django/db/models/lookups.py\n@@ -262,9 +262,9 @@ def process_rhs(self, compiler, connection):\n from django.db.models.sql.query import Query\n if isinstance(self.rhs, Query):\n if self.rhs.has_limit_one():\n- # The subquery must select only the pk.\n- self.rhs.clear_select_clause()\n- self.rhs.add_fields(['pk'])\n+ if not self.rhs.has_select_fields:\n+ self.rhs.clear_select_clause()\n+ self.rhs.add_fields(['pk'])\n else:\n raise ValueError(\n 'The QuerySet value for an exact lookup must be limited to '\n", - "expected_spans": { - "django/db/models/lookups.py": [ - "Exact.process_rhs" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "django__django-11815", - "repo": "django/django", - "base_commit": "e02f67ef2d03d48128e7a118bf75f0418e24e8ac", - "problem_statement": "Migrations uses value of enum object instead of its name.\nDescription\n\t \n\t\t(last modified by oasl)\n\t \nWhen using Enum object as a default value for a CharField, the generated migration file uses the value of the Enum object instead of the its name. This causes a problem when using Django translation on the value of the Enum object. \nThe problem is that, when the Enum object value get translated to the users language, the old migration files raise an error stating that the Enum does not have the corresponding value. (because the Enum value is translated to another language)\nExample:\nLet say we have this code in models.py:\nfrom enum import Enum\nfrom django.utils.translation import gettext_lazy as _\nfrom django.db import models\nclass Status(Enum):\n\tGOOD = _('Good') # 'Good' will be translated\n\tBAD = _('Bad') # 'Bad' will be translated\n\tdef __str__(self):\n\t\treturn self.name\nclass Item(models.Model):\n\tstatus = models.CharField(default=Status.GOOD, max_length=128)\nIn the generated migration file, the code will be:\n...\n('status', models.CharField(default=Status('Good'), max_length=128))\n...\nAfter the translation, 'Good' will be translated to another word and it will not be part of the Status Enum class any more, so the migration file will raise the error on the previous line:\nValueError: 'Good' is not a valid Status\nShouldn't the code generated by the migration uses the name of the Status Enum 'GOOD', not the value of it, since it is changeable?\nIt should be:\n('status', models.CharField(default=Status['GOOD'], max_length=128))\nThis will be correct regardless of the translated word\n", - "golden_patch": "diff --git a/django/db/migrations/serializer.py b/django/db/migrations/serializer.py\n--- a/django/db/migrations/serializer.py\n+++ b/django/db/migrations/serializer.py\n@@ -120,9 +120,10 @@ class EnumSerializer(BaseSerializer):\n def serialize(self):\n enum_class = self.value.__class__\n module = enum_class.__module__\n- v_string, v_imports = serializer_factory(self.value.value).serialize()\n- imports = {'import %s' % module, *v_imports}\n- return \"%s.%s(%s)\" % (module, enum_class.__name__, v_string), imports\n+ return (\n+ '%s.%s[%r]' % (module, enum_class.__name__, self.value.name),\n+ {'import %s' % module},\n+ )\n \n \n class FloatSerializer(BaseSimpleSerializer):\n", - "expected_spans": { - "django/db/migrations/serializer.py": [ - "EnumSerializer", - "EnumSerializer.serialize" - ] - }, - "resolved_by": [ + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } + }, { "name": "20240509_amazon-q-developer-agent-20240430-dev", - "patch": "--- a/django/db/migrations/serializer.py\n+++ b/django/db/migrations/serializer.py\n@@ -117,15 +117,14 @@\n \n \n class EnumSerializer(BaseSerializer):\n def serialize(self):\n enum_class = self.value.__class__\n module = enum_class.__module__\n- v_string, v_imports = serializer_factory(self.value.value).serialize()\n- imports = {'import %s' % module, *v_imports}\n- return \"%s.%s(%s)\" % (module, enum_class.__name__, v_string), imports\n+ imports = {'import %s' % module}\n+ return \"%s.%s['%s']\" % (module, enum_class.__name__, self.value.name), imports\n \n \n class FloatSerializer(BaseSimpleSerializer):\n def serialize(self):\n if math.isnan(self.value) or math.isinf(self.value):\n return 'float(\"{}\")'.format(self.value), set()\n" + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/django/db/migrations/serializer.py b/django/db/migrations/serializer.py\nindex 27b5cbd379..aa780b21a4 100644\n--- a/django/db/migrations/serializer.py\n+++ b/django/db/migrations/serializer.py\n@@ -120,9 +120,9 @@ class EnumSerializer(BaseSerializer):\n def serialize(self):\n enum_class = self.value.__class__\n module = enum_class.__module__\n- v_string, v_imports = serializer_factory(self.value.value).serialize()\n- imports = {'import %s' % module, *v_imports}\n- return \"%s.%s(%s)\" % (module, enum_class.__name__, v_string), imports\n+ enum_name = self.value.name\n+ imports = {'import %s' % module}\n+ return \"%s.%s['%s']\" % (module, enum_class.__name__, enum_name), imports\n \n \n class FloatSerializer(BaseSimpleSerializer):\n" + "name": "20240811_gru", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } }, { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/django/db/migrations/serializer.py b/django/db/migrations/serializer.py\nindex 27b5cbd379..4622b0fcd8 100644\n--- a/django/db/migrations/serializer.py\n+++ b/django/db/migrations/serializer.py\n@@ -120,9 +120,9 @@ class EnumSerializer(BaseSerializer):\n def serialize(self):\n enum_class = self.value.__class__\n module = enum_class.__module__\n- v_string, v_imports = serializer_factory(self.value.value).serialize()\n+ v_string, v_imports = serializer_factory(self.value.name).serialize()\n imports = {'import %s' % module, *v_imports}\n- return \"%s.%s(%s)\" % (module, enum_class.__name__, v_string), imports\n+ return \"%s.%s[%s]\" % (module, enum_class.__name__, v_string), imports\n \n \n class FloatSerializer(BaseSimpleSerializer):\n" - } - ], - "alternative_spans": [ + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ], + "sympy/functions/elementary/tests/test_hyperbolic.py": [ + "test_complex_2899" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } + }, { - "run_name": "20240509_amazon-q-developer-agent-20240430-dev", - "spans": { - "django/db/migrations/serializer.py": [ - "EnumSerializer.serialize" + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/hyperbolic.py": [ + "coth.eval" ] } } - ] + ], + "alternative_spans": [] }, { - "instance_id": "sympy__sympy-17655", + "instance_id": "sympy__sympy-13647", "repo": "sympy/sympy", - "base_commit": "f5e965947af2410ded92cfad987aaf45262ea434", - "problem_statement": "Unexpected exception when multiplying geometry.Point and number\n```python\r\nfrom sympy import geometry as ge\r\nimport sympy\r\n\r\npoint1 = ge.Point(0,0)\r\npoint2 = ge.Point(1,1)\r\n```\r\n\r\nThis line works fine\r\n```python\r\npoint1 + point2 * sympy.sympify(2.0)\r\n```\r\n\r\nBut when I write the same this way it raises an exception\r\n```python\r\npoint1 + sympy.sympify(2.0) * point2\r\n```\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n~/.virtualenvs/test/lib/python3.6/site-packages/sympy/geometry/point.py in __add__(self, other)\r\n 219 try:\r\n--> 220 s, o = Point._normalize_dimension(self, Point(other, evaluate=False))\r\n 221 except TypeError:\r\n\r\n~/.virtualenvs/test/lib/python3.6/site-packages/sympy/geometry/point.py in __new__(cls, *args, **kwargs)\r\n 128 Expecting sequence of coordinates, not `{}`'''\r\n--> 129 .format(func_name(coords))))\r\n 130 # A point where only `dim` is specified is initialized\r\n\r\nTypeError: \r\nExpecting sequence of coordinates, not `Mul`\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nGeometryError Traceback (most recent call last)\r\n in \r\n----> 1 point1 + sympy.sympify(2.0)* point2\r\n\r\n~/.virtualenvs/test/lib/python3.6/site-packages/sympy/geometry/point.py in __add__(self, other)\r\n 220 s, o = Point._normalize_dimension(self, Point(other, evaluate=False))\r\n 221 except TypeError:\r\n--> 222 raise GeometryError(\"Don't know how to add {} and a Point object\".format(other))\r\n 223 \r\n 224 coords = [simplify(a + b) for a, b in zip(s, o)]\r\n\r\nGeometryError: Don't know how to add 2.0*Point2D(1, 1) and a Point object\r\n```\r\n\r\nThe expected behaviour is, that both lines give the same result\n", - "golden_patch": "diff --git a/sympy/geometry/point.py b/sympy/geometry/point.py\n--- a/sympy/geometry/point.py\n+++ b/sympy/geometry/point.py\n@@ -278,6 +278,10 @@ def __mul__(self, factor):\n coords = [simplify(x*factor) for x in self.args]\n return Point(coords, evaluate=False)\n \n+ def __rmul__(self, factor):\n+ \"\"\"Multiply a factor by point's coordinates.\"\"\"\n+ return self.__mul__(factor)\n+\n def __neg__(self):\n \"\"\"Negate the point.\"\"\"\n coords = [-x for x in self.args]\n", + "base_commit": "67e3c956083d0128a621f65ee86a7dacd4f9f19f", + "problem_statement": "Matrix.col_insert() no longer seems to work correctly.\nExample:\r\n\r\n```\r\nIn [28]: import sympy as sm\r\n\r\nIn [29]: M = sm.eye(6)\r\n\r\nIn [30]: M\r\nOut[30]: \r\n\u23a11 0 0 0 0 0\u23a4\r\n\u23a2 \u23a5\r\n\u23a20 1 0 0 0 0\u23a5\r\n\u23a2 \u23a5\r\n\u23a20 0 1 0 0 0\u23a5\r\n\u23a2 \u23a5\r\n\u23a20 0 0 1 0 0\u23a5\r\n\u23a2 \u23a5\r\n\u23a20 0 0 0 1 0\u23a5\r\n\u23a2 \u23a5\r\n\u23a30 0 0 0 0 1\u23a6\r\n\r\nIn [31]: V = 2 * sm.ones(6, 2)\r\n\r\nIn [32]: V\r\nOut[32]: \r\n\u23a12 2\u23a4\r\n\u23a2 \u23a5\r\n\u23a22 2\u23a5\r\n\u23a2 \u23a5\r\n\u23a22 2\u23a5\r\n\u23a2 \u23a5\r\n\u23a22 2\u23a5\r\n\u23a2 \u23a5\r\n\u23a22 2\u23a5\r\n\u23a2 \u23a5\r\n\u23a32 2\u23a6\r\n\r\nIn [33]: M.col_insert(3, V)\r\nOut[33]: \r\n\u23a11 0 0 2 2 1 0 0\u23a4\r\n\u23a2 \u23a5\r\n\u23a20 1 0 2 2 0 1 0\u23a5\r\n\u23a2 \u23a5\r\n\u23a20 0 1 2 2 0 0 1\u23a5\r\n\u23a2 \u23a5\r\n\u23a20 0 0 2 2 0 0 0\u23a5\r\n\u23a2 \u23a5\r\n\u23a20 0 0 2 2 0 0 0\u23a5\r\n\u23a2 \u23a5\r\n\u23a30 0 0 2 2 0 0 0\u23a6\r\nIn [34]: sm.__version__\r\nOut[34]: '1.1.1'\r\n```\r\n\r\nThe 3 x 3 identify matrix to the right of the columns of twos is shifted from the bottom three rows to the top three rows.\r\n\r\n@siefkenj Do you think this has to do with your matrix refactor?\n", + "golden_patch": "diff --git a/sympy/matrices/common.py b/sympy/matrices/common.py\n--- a/sympy/matrices/common.py\n+++ b/sympy/matrices/common.py\n@@ -86,7 +86,7 @@ def entry(i, j):\n return self[i, j]\n elif pos <= j < pos + other.cols:\n return other[i, j - pos]\n- return self[i, j - pos - other.cols]\n+ return self[i, j - other.cols]\n \n return self._new(self.rows, self.cols + other.cols,\n lambda i, j: entry(i, j))\n", + "test_patch": "diff --git a/sympy/matrices/tests/test_commonmatrix.py b/sympy/matrices/tests/test_commonmatrix.py\n--- a/sympy/matrices/tests/test_commonmatrix.py\n+++ b/sympy/matrices/tests/test_commonmatrix.py\n@@ -200,6 +200,14 @@ def test_col_insert():\n l = [0, 0, 0]\n l.insert(i, 4)\n assert flatten(zeros_Shaping(3).col_insert(i, c4).row(0).tolist()) == l\n+ # issue 13643\n+ assert eye_Shaping(6).col_insert(3, Matrix([[2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]])) == \\\n+ Matrix([[1, 0, 0, 2, 2, 0, 0, 0],\n+ [0, 1, 0, 2, 2, 0, 0, 0],\n+ [0, 0, 1, 2, 2, 0, 0, 0],\n+ [0, 0, 0, 2, 2, 1, 0, 0],\n+ [0, 0, 0, 2, 2, 0, 1, 0],\n+ [0, 0, 0, 2, 2, 0, 0, 1]])\n \n def test_extract():\n m = ShapingOnlyMatrix(4, 3, lambda i, j: i*3 + j)\n", + "fail_to_pass": "[\"test_col_insert\"]", + "pass_to_pass": "[\"test__MinimalMatrix\", \"test_vec\", \"test_tolist\", \"test_row_col_del\", \"test_get_diag_blocks1\", \"test_get_diag_blocks2\", \"test_shape\", \"test_reshape\", \"test_row_col\", \"test_row_join\", \"test_col_join\", \"test_row_insert\", \"test_extract\", \"test_hstack\", \"test_vstack\", \"test_atoms\", \"test_free_symbols\", \"test_has\", \"test_is_anti_symmetric\", \"test_diagonal_symmetrical\", \"test_is_hermitian\", \"test_is_Identity\", \"test_is_symbolic\", \"test_is_upper\", \"test_is_lower\", \"test_is_square\", \"test_is_symmetric\", \"test_is_hessenberg\", \"test_is_zero\", \"test_values\", \"test_applyfunc\", \"test_adjoint\", \"test_as_real_imag\", \"test_conjugate\", \"test_doit\", \"test_evalf\", \"test_expand\", \"test_replace\", \"test_replace_map\", \"test_simplify\", \"test_subs\", \"test_trace\", \"test_xreplace\", \"test_permute\", \"test_abs\", \"test_add\", \"test_multiplication\", \"test_power\", \"test_neg\", \"test_sub\", \"test_div\", \"test_det\", \"test_adjugate\", \"test_cofactor_and_minors\", \"test_charpoly\", \"test_row_op\", \"test_col_op\", \"test_is_echelon\", \"test_echelon_form\", \"test_rref\", \"test_eye\", \"test_ones\", \"test_zeros\", \"test_diag\", \"test_jordan_block\", \"test_columnspace\", \"test_rowspace\", \"test_nullspace\", \"test_eigenvals\", \"test_eigenvects\", \"test_left_eigenvects\", \"test_diagonalize\", \"test_is_diagonalizable\", \"test_jordan_form\", \"test_singular_values\", \"test_integrate\"]", "expected_spans": { - "sympy/geometry/point.py": [ - "Point.__neg__" + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "test_file_spans": { + "sympy/matrices/tests/test_commonmatrix.py": [ + "test_col_insert" ] }, "resolved_by": [ { "name": "20240523_aider", - "patch": "diff --git a/sympy/geometry/point.py b/sympy/geometry/point.py\nindex 7c978c47be..62cf2518e4 100644\n--- a/sympy/geometry/point.py\n+++ b/sympy/geometry/point.py\n@@ -278,6 +278,12 @@ def __mul__(self, factor):\n coords = [simplify(x*factor) for x in self.args]\n return Point(coords, evaluate=False)\n \n+ def __rmul__(self, other):\n+ \"\"\"Right multiplication to handle scalar * Point.\"\"\"\n+ if isinstance(other, (int, float, sympify(0).func)):\n+ return self.__mul__(other)\n+ return NotImplemented\n+\n def __neg__(self):\n \"\"\"Negate the point.\"\"\"\n coords = [-x for x in self.args]\n" + "updated_spans": { + "sympy/assumptions/sathandlers.py": [ + "imports" + ], + "sympy/core/basic.py": [ + "imports" + ], + "sympy/core/containers.py": [ + "OrderedSet" + ], + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ], + "sympy/matrices/dense.py": [ + "imports", + "MutableDenseMatrix._new" + ], + "sympy/matrices/matrices.py": [ + "MatrixBase._handle_creation_inputs" + ], + "sympy/plotting/plot.py": [ + "imports" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/sympy/geometry/point.py b/sympy/geometry/point.py\nindex 7c978c47be..6bc11bfaf1 100644\n--- a/sympy/geometry/point.py\n+++ b/sympy/geometry/point.py\n@@ -277,7 +277,19 @@ def __mul__(self, factor):\n factor = sympify(factor)\n coords = [simplify(x*factor) for x in self.args]\n return Point(coords, evaluate=False)\n+ def __rmul__(self, other):\n+ \"\"\"Right multiplication of Point by a factor.\n \n+ This method is called when a Point is multiplied by another factor\n+ from the right side, for example, `factor * Point`.\n+\n+ Returns\n+ =======\n+ Point\n+ The result of the multiplication.\n+\n+ \"\"\"\n+ return self.__mul__(other)\n def __neg__(self):\n \"\"\"Negate the point.\"\"\"\n coords = [-x for x in self.args]\n" + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/sympy/geometry/point.py b/sympy/geometry/point.py\nindex 7c978c47be..eed56bdc5d 100644\n--- a/sympy/geometry/point.py\n+++ b/sympy/geometry/point.py\n@@ -277,7 +277,24 @@ def __mul__(self, factor):\n factor = sympify(factor)\n coords = [simplify(x*factor) for x in self.args]\n return Point(coords, evaluate=False)\n+ def __rmul__(self, factor):\n+ \"\"\"Right multiplication of Point by a factor.\n \n+ This method is called when a Point is multiplied by a scalar\n+ on the right side of the multiplication operator.\n+\n+ Returns\n+ =======\n+\n+ point : Point\n+ The result of the multiplication.\n+\n+ See Also\n+ ========\n+\n+ sympy.geometry.point.Point.__mul__\n+ \"\"\"\n+ return self.__mul__(factor)\n def __neg__(self):\n \"\"\"Negate the point.\"\"\"\n coords = [-x for x in self.args]\n" - } - ], - "alternative_spans": [ - { - "run_name": "20240604_CodeR", - "spans": { - "sympy/geometry/point.py": [] + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_del", + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_del", + "MatrixShaping._eval_col_insert" + ] } }, { - "run_name": "20240612_IBM_Research_Agent101", - "spans": { - "sympy/geometry/point.py": [] + "name": "20240828_autose_mixed", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] } - } - ] - }, - { - "instance_id": "django__django-11848", - "repo": "django/django", - "base_commit": "f0adf3b9b7a19cdee05368ff0c0c2d087f011180", - "problem_statement": "django.utils.http.parse_http_date two digit year check is incorrect\nDescription\n\t \n\t\t(last modified by Ad Timmering)\n\t \nRFC 850 does not mention this, but in RFC 7231 (and there's something similar in RFC 2822), there's the following quote:\nRecipients of a timestamp value in rfc850-date format, which uses a\ntwo-digit year, MUST interpret a timestamp that appears to be more\nthan 50 years in the future as representing the most recent year in\nthe past that had the same last two digits.\nCurrent logic is hard coded to consider 0-69 to be in 2000-2069, and 70-99 to be 1970-1999, instead of comparing versus the current year.\n", - "golden_patch": "diff --git a/django/utils/http.py b/django/utils/http.py\n--- a/django/utils/http.py\n+++ b/django/utils/http.py\n@@ -176,10 +176,14 @@ def parse_http_date(date):\n try:\n year = int(m.group('year'))\n if year < 100:\n- if year < 70:\n- year += 2000\n+ current_year = datetime.datetime.utcnow().year\n+ current_century = current_year - (current_year % 100)\n+ if year - (current_year % 100) > 50:\n+ # year that appears to be more than 50 years in the future are\n+ # interpreted as representing the past.\n+ year += current_century - 100\n else:\n- year += 1900\n+ year += current_century\n month = MONTHS.index(m.group('mon').lower()) + 1\n day = int(m.group('day'))\n hour = int(m.group('hour'))\n", - "expected_spans": { - "django/utils/http.py": [ - "parse_http_date" - ] - }, - "resolved_by": [ + }, { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/django/utils/http.py b/django/utils/http.py\nindex 572cfb4347..272cd5c937 100644\n--- a/django/utils/http.py\n+++ b/django/utils/http.py\n@@ -176,10 +176,11 @@ def parse_http_date(date):\n try:\n year = int(m.group('year'))\n if year < 100:\n- if year < 70:\n- year += 2000\n- else:\n- year += 1900\n+ current_year = datetime.datetime.utcnow().year\n+ current_century = current_year // 100 * 100\n+ year += current_century\n+ if year > current_year + 50:\n+ year -= 100\n month = MONTHS.index(m.group('mon').lower()) + 1\n day = int(m.group('day'))\n hour = int(m.group('hour'))\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "pydata__xarray-3364", - "repo": "pydata/xarray", - "base_commit": "863e49066ca4d61c9adfe62aca3bf21b90e1af8c", - "problem_statement": "Ignore missing variables when concatenating datasets?\nSeveral users (@raj-kesavan, @richardotis, now myself) have wondered about how to concatenate xray Datasets with different variables.\n\nWith the current `xray.concat`, you need to awkwardly create dummy variables filled with `NaN` in datasets that don't have them (or drop mismatched variables entirely). Neither of these are great options -- `concat` should have an option (the default?) to take care of this for the user.\n\nThis would also be more consistent with `pd.concat`, which takes a more relaxed approach to matching dataframes with different variables (it does an outer join).\n\n", - "golden_patch": "diff --git a/xarray/core/concat.py b/xarray/core/concat.py\n--- a/xarray/core/concat.py\n+++ b/xarray/core/concat.py\n@@ -312,15 +312,9 @@ def _dataset_concat(\n to_merge = {var: [] for var in variables_to_merge}\n \n for ds in datasets:\n- absent_merge_vars = variables_to_merge - set(ds.variables)\n- if absent_merge_vars:\n- raise ValueError(\n- \"variables %r are present in some datasets but not others. \"\n- % absent_merge_vars\n- )\n-\n for var in variables_to_merge:\n- to_merge[var].append(ds.variables[var])\n+ if var in ds:\n+ to_merge[var].append(ds.variables[var])\n \n for var in variables_to_merge:\n result_vars[var] = unique_variable(\n", - "expected_spans": { - "xarray/core/concat.py": [ - "_dataset_concat" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "django__django-11905", - "repo": "django/django", - "base_commit": "2f72480fbd27896c986c45193e1603e35c0b19a7", - "problem_statement": "Prevent using __isnull lookup with non-boolean value.\nDescription\n\t \n\t\t(last modified by Mariusz Felisiak)\n\t \n__isnull should not allow for non-boolean values. Using truthy/falsey doesn't promote INNER JOIN to an OUTER JOIN but works fine for a simple queries. Using non-boolean values is \u200bundocumented and untested. IMO we should raise an error for non-boolean values to avoid confusion and for consistency.\n", - "golden_patch": "diff --git a/django/db/models/lookups.py b/django/db/models/lookups.py\n--- a/django/db/models/lookups.py\n+++ b/django/db/models/lookups.py\n@@ -1,5 +1,6 @@\n import itertools\n import math\n+import warnings\n from copy import copy\n \n from django.core.exceptions import EmptyResultSet\n@@ -9,6 +10,7 @@\n )\n from django.db.models.query_utils import RegisterLookupMixin\n from django.utils.datastructures import OrderedSet\n+from django.utils.deprecation import RemovedInDjango40Warning\n from django.utils.functional import cached_property\n \n \n@@ -463,6 +465,17 @@ class IsNull(BuiltinLookup):\n prepare_rhs = False\n \n def as_sql(self, compiler, connection):\n+ if not isinstance(self.rhs, bool):\n+ # When the deprecation ends, replace with:\n+ # raise ValueError(\n+ # 'The QuerySet value for an isnull lookup must be True or '\n+ # 'False.'\n+ # )\n+ warnings.warn(\n+ 'Using a non-boolean value for an isnull lookup is '\n+ 'deprecated, use True or False instead.',\n+ RemovedInDjango40Warning,\n+ )\n sql, params = compiler.compile(self.lhs)\n if self.rhs:\n return \"%s IS NULL\" % sql, params\n", - "expected_spans": { - "django/db/models/lookups.py": [ - "imports", - "IsNull.as_sql" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "django__django-11910", - "repo": "django/django", - "base_commit": "d232fd76a85870daf345fd8f8d617fe7802ae194", - "problem_statement": "ForeignKey's to_field parameter gets the old field's name when renaming a PrimaryKey.\nDescription\n\t\nHaving these two models \nclass ModelA(models.Model):\n\tfield_wrong = models.CharField('field1', max_length=50, primary_key=True) # I'm a Primary key.\nclass ModelB(models.Model):\n\tfield_fk = models.ForeignKey(ModelA, blank=True, null=True, on_delete=models.CASCADE) \n... migrations applyed ...\nthe ModelA.field_wrong field has been renamed ... and Django recognizes the \"renaming\"\n# Primary key renamed\nclass ModelA(models.Model):\n\tfield_fixed = models.CharField('field1', max_length=50, primary_key=True) # I'm a Primary key.\nAttempts to to_field parameter. \nThe to_field points to the old_name (field_typo) and not to the new one (\"field_fixed\")\nclass Migration(migrations.Migration):\n\tdependencies = [\n\t\t('app1', '0001_initial'),\n\t]\n\toperations = [\n\t\tmigrations.RenameField(\n\t\t\tmodel_name='modela',\n\t\t\told_name='field_wrong',\n\t\t\tnew_name='field_fixed',\n\t\t),\n\t\tmigrations.AlterField(\n\t\t\tmodel_name='modelb',\n\t\t\tname='modela',\n\t\t\tfield=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app1.ModelB', to_field='field_wrong'),\n\t\t),\n\t]\n", - "golden_patch": "diff --git a/django/db/migrations/autodetector.py b/django/db/migrations/autodetector.py\n--- a/django/db/migrations/autodetector.py\n+++ b/django/db/migrations/autodetector.py\n@@ -927,6 +927,10 @@ def generate_altered_fields(self):\n if remote_field_name:\n to_field_rename_key = rename_key + (remote_field_name,)\n if to_field_rename_key in self.renamed_fields:\n+ # Repoint both model and field name because to_field\n+ # inclusion in ForeignKey.deconstruct() is based on\n+ # both.\n+ new_field.remote_field.model = old_field.remote_field.model\n new_field.remote_field.field_name = old_field.remote_field.field_name\n # Handle ForeignObjects which can have multiple from_fields/to_fields.\n from_fields = getattr(new_field, 'from_fields', None)\n", - "expected_spans": { - "django/db/migrations/autodetector.py": [ - "MigrationAutodetector.generate_altered_fields" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "django__django-11964", - "repo": "django/django", - "base_commit": "fc2b1cc926e34041953738e58fa6ad3053059b22", - "problem_statement": "The value of a TextChoices/IntegerChoices field has a differing type\nDescription\n\t\nIf we create an instance of a model having a CharField or IntegerField with the keyword choices pointing to IntegerChoices or TextChoices, the value returned by the getter of the field will be of the same type as the one created by enum.Enum (enum value).\nFor example, this model:\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nclass MyChoice(models.TextChoices):\n\tFIRST_CHOICE = \"first\", _(\"The first choice, it is\")\n\tSECOND_CHOICE = \"second\", _(\"The second choice, it is\")\nclass MyObject(models.Model):\n\tmy_str_value = models.CharField(max_length=10, choices=MyChoice.choices)\nThen this test:\nfrom django.test import TestCase\nfrom testing.pkg.models import MyObject, MyChoice\nclass EnumTest(TestCase):\n\tdef setUp(self) -> None:\n\t\tself.my_object = MyObject.objects.create(my_str_value=MyChoice.FIRST_CHOICE)\n\tdef test_created_object_is_str(self):\n\t\tmy_object = self.my_object\n\t\tself.assertIsInstance(my_object.my_str_value, str)\n\t\tself.assertEqual(str(my_object.my_str_value), \"first\")\n\tdef test_retrieved_object_is_str(self):\n\t\tmy_object = MyObject.objects.last()\n\t\tself.assertIsInstance(my_object.my_str_value, str)\n\t\tself.assertEqual(str(my_object.my_str_value), \"first\")\nAnd then the results:\n(django30-venv) \u279c django30 ./manage.py test\nCreating test database for alias 'default'...\nSystem check identified no issues (0 silenced).\nF.\n======================================================================\nFAIL: test_created_object_is_str (testing.tests.EnumTest)\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"/Users/mikailkocak/Development/django30/testing/tests.py\", line 14, in test_created_object_is_str\n\tself.assertEqual(str(my_object.my_str_value), \"first\")\nAssertionError: 'MyChoice.FIRST_CHOICE' != 'first'\n- MyChoice.FIRST_CHOICE\n+ first\n----------------------------------------------------------------------\nRan 2 tests in 0.002s\nFAILED (failures=1)\nWe notice when invoking __str__(...) we don't actually get the value property of the enum value which can lead to some unexpected issues, especially when communicating to an external API with a freshly created instance that will send MyEnum.MyValue, and the one that was retrieved would send my_value.\n", - "golden_patch": "diff --git a/django/db/models/enums.py b/django/db/models/enums.py\n--- a/django/db/models/enums.py\n+++ b/django/db/models/enums.py\n@@ -60,7 +60,13 @@ def values(cls):\n \n class Choices(enum.Enum, metaclass=ChoicesMeta):\n \"\"\"Class for creating enumerated choices.\"\"\"\n- pass\n+\n+ def __str__(self):\n+ \"\"\"\n+ Use value when cast to str, so that Choices set as model instance\n+ attributes are rendered as expected in templates and similar contexts.\n+ \"\"\"\n+ return str(self.value)\n \n \n class IntegerChoices(int, Choices):\n", - "expected_spans": { - "django/db/models/enums.py": [ - "Choices" - ] - }, - "resolved_by": [ + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } + }, { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/django/db/models/enums.py b/django/db/models/enums.py\nindex bbe362a6ab..e462842a93 100644\n--- a/django/db/models/enums.py\n+++ b/django/db/models/enums.py\n@@ -60,7 +60,9 @@ class ChoicesMeta(enum.EnumMeta):\n \n class Choices(enum.Enum, metaclass=ChoicesMeta):\n \"\"\"Class for creating enumerated choices.\"\"\"\n- pass\n+ \n+ def __str__(self):\n+ return str(self.value)\n \n \n class IntegerChoices(int, Choices):\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "django__django-11999", - "repo": "django/django", - "base_commit": "84633905273fc916e3d17883810d9969c03f73c2", - "problem_statement": "Cannot override get_FOO_display() in Django 2.2+.\nDescription\n\t\nI cannot override the get_FIELD_display function on models since version 2.2. It works in version 2.1.\nExample:\nclass FooBar(models.Model):\n\tfoo_bar = models.CharField(_(\"foo\"), choices=[(1, 'foo'), (2, 'bar')])\n\tdef __str__(self):\n\t\treturn self.get_foo_bar_display() # This returns 'foo' or 'bar' in 2.2, but 'something' in 2.1\n\tdef get_foo_bar_display(self):\n\t\treturn \"something\"\nWhat I expect is that I should be able to override this function.\n", - "golden_patch": "diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -763,8 +763,12 @@ def contribute_to_class(self, cls, name, private_only=False):\n if not getattr(cls, self.attname, None):\n setattr(cls, self.attname, self.descriptor_class(self))\n if self.choices is not None:\n- setattr(cls, 'get_%s_display' % self.name,\n- partialmethod(cls._get_FIELD_display, field=self))\n+ if not hasattr(cls, 'get_%s_display' % self.name):\n+ setattr(\n+ cls,\n+ 'get_%s_display' % self.name,\n+ partialmethod(cls._get_FIELD_display, field=self),\n+ )\n \n def get_filter_kwargs_for_object(self, obj):\n \"\"\"\n", - "expected_spans": { - "django/db/models/fields/__init__.py": [ - "Field.contribute_to_class" - ] - }, - "resolved_by": [ + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -762,8 +762,10 @@\n # such fields can't be deferred (we don't have a check for this).\n if not getattr(cls, self.attname, None):\n setattr(cls, self.attname, self.descriptor_class(self))\n- if self.choices is not None:\n- setattr(cls, 'get_%s_display' % self.name,\n+ # Prioritize custom `get_%s_display` if defined in the model, allowing for overrides.\n+ # Ensure that the field's choices are still accessible for custom implementations.\n+ if self.choices is not None and not hasattr(cls, 'get_%s_display' % self.name):\n+ setattr(cls, 'get_%s_display' % self.name, \n partialmethod(cls._get_FIELD_display, field=self))\n \n def get_filter_kwargs_for_object(self, obj):\n" + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } }, { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\nindex ee45bb941e..a4acddd9d4 100644\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -763,8 +763,11 @@ class Field(RegisterLookupMixin):\n if not getattr(cls, self.attname, None):\n setattr(cls, self.attname, self.descriptor_class(self))\n if self.choices is not None:\n- setattr(cls, 'get_%s_display' % self.name,\n- partialmethod(cls._get_FIELD_display, field=self))\n+ # Check if a get_FOO_display method already exists before setting it\n+ display_method_name = 'get_%s_display' % self.name\n+ if not hasattr(cls, display_method_name):\n+ setattr(cls, display_method_name,\n+ partialmethod(cls._get_FIELD_display, field=self))\n \n def get_filter_kwargs_for_object(self, obj):\n \"\"\"\n" + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\nindex ee45bb941e..06a296534b 100644\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -763,8 +763,8 @@ class Field(RegisterLookupMixin):\n if not getattr(cls, self.attname, None):\n setattr(cls, self.attname, self.descriptor_class(self))\n if self.choices is not None:\n- setattr(cls, 'get_%s_display' % self.name,\n- partialmethod(cls._get_FIELD_display, field=self))\n+ if not hasattr(cls, 'get_%s_display' % self.name):\n+ setattr(cls, 'get_%s_display' % self.name, partialmethod(cls._get_FIELD_display, field=self))\n \n def get_filter_kwargs_for_object(self, obj):\n \"\"\"\ndiff --git a/reproduce_issue.py b/reproduce_issue.py\nnew file mode 100644\nindex 0000000000..1501e38a49\n--- /dev/null\n+++ b/reproduce_issue.py\n@@ -0,0 +1,43 @@\n+import django\n+from django.conf import settings\n+\n+settings.configure(\n+ INSTALLED_APPS=[\n+ 'django.contrib.auth',\n+ 'django.contrib.contenttypes',\n+ '__main__', # Allows us to define models in the main script\n+ ],\n+ DATABASES={\n+ 'default': {\n+ 'ENGINE': 'django.db.backends.sqlite3',\n+ 'NAME': ':memory:',\n+ }\n+ },\n+ USE_I18N=True,\n+ USE_L10N=True,\n+ USE_TZ=True,\n+ SECRET_KEY='fake-key-for-testing',\n+)\n+\n+django.setup()\n+\n+from django.db import models\n+from django.utils.translation import gettext_lazy as _\n+\n+class FooBar(models.Model):\n+ foo_bar = models.CharField(_(\"foo\"), choices=[(1, 'foo'), (2, 'bar')])\n+\n+ def __str__(self):\n+ return self.get_foo_bar_display()\n+\n+ def get_foo_bar_display(self):\n+ return \"something\"\n+\n+# Create an instance of FooBar and set foo_bar to 1 ('foo')\n+foo_bar_instance = FooBar(foo_bar=1)\n+\n+# Print the result of __str__ method, which should call the overridden get_foo_bar_display\n+print(foo_bar_instance.__str__())\n+\n+# Add a line to indicate the script completed successfully\n+print(\"Script completed successfully, no errors.\")\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "pytest-dev__pytest-6116", - "repo": "pytest-dev/pytest", - "base_commit": "e670ff76cbad80108bde9bab616b66771b8653cf", - "problem_statement": "pytest --collect-only needs a one char shortcut command\nI find myself needing to run `--collect-only` very often and that cli argument is a very long to type one. \r\n\r\nI do think that it would be great to allocate a character for it, not sure which one yet. Please use up/down thumbs to vote if you would find it useful or not and eventually proposing which char should be used. \r\n\r\nClearly this is a change very easy to implement but first I want to see if others would find it useful or not.\npytest --collect-only needs a one char shortcut command\nI find myself needing to run `--collect-only` very often and that cli argument is a very long to type one. \r\n\r\nI do think that it would be great to allocate a character for it, not sure which one yet. Please use up/down thumbs to vote if you would find it useful or not and eventually proposing which char should be used. \r\n\r\nClearly this is a change very easy to implement but first I want to see if others would find it useful or not.\n", - "golden_patch": "diff --git a/src/_pytest/main.py b/src/_pytest/main.py\n--- a/src/_pytest/main.py\n+++ b/src/_pytest/main.py\n@@ -109,6 +109,7 @@ def pytest_addoption(parser):\n group.addoption(\n \"--collectonly\",\n \"--collect-only\",\n+ \"--co\",\n action=\"store_true\",\n help=\"only collect tests, don't execute them.\",\n ),\n", - "expected_spans": { - "src/_pytest/main.py": [ - "pytest_addoption" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "scikit-learn__scikit-learn-15512", - "repo": "scikit-learn/scikit-learn", - "base_commit": "b8a4da8baa1137f173e7035f104067c7d2ffde22", - "problem_statement": "Return values of non converged affinity propagation clustering\nThe affinity propagation Documentation states: \r\n\"When the algorithm does not converge, it returns an empty array as cluster_center_indices and -1 as label for each training sample.\"\r\n\r\nExample:\r\n```python\r\nfrom sklearn.cluster import AffinityPropagation\r\nimport pandas as pd\r\n\r\ndata = pd.DataFrame([[1,0,0,0,0,0],[0,1,1,1,0,0],[0,0,1,0,0,1]])\r\naf = AffinityPropagation(affinity='euclidean', verbose=True, copy=False, max_iter=2).fit(data)\r\n\r\nprint(af.cluster_centers_indices_)\r\nprint(af.labels_)\r\n\r\n```\r\nI would expect that the clustering here (which does not converge) prints first an empty List and then [-1,-1,-1], however, I get [2] as cluster center and [0,0,0] as cluster labels. \r\nThe only way I currently know if the clustering fails is if I use the verbose option, however that is very unhandy. A hacky solution is to check if max_iter == n_iter_ but it could have converged exactly 15 iterations before max_iter (although unlikely).\r\nI am not sure if this is intended behavior and the documentation is wrong?\r\n\r\nFor my use-case within a bigger script, I would prefer to get back -1 values or have a property to check if it has converged, as otherwise, a user might not be aware that the clustering never converged.\r\n\r\n\r\n#### Versions\r\nSystem:\r\n python: 3.6.7 | packaged by conda-forge | (default, Nov 21 2018, 02:32:25) [GCC 4.8.2 20140120 (Red Hat 4.8.2-15)]\r\nexecutable: /home/jenniferh/Programs/anaconda3/envs/TF_RDKit_1_19/bin/python\r\n machine: Linux-4.15.0-52-generic-x86_64-with-debian-stretch-sid\r\nBLAS:\r\n macros: SCIPY_MKL_H=None, HAVE_CBLAS=None\r\n lib_dirs: /home/jenniferh/Programs/anaconda3/envs/TF_RDKit_1_19/lib\r\ncblas_libs: mkl_rt, pthread\r\nPython deps:\r\n pip: 18.1\r\n setuptools: 40.6.3\r\n sklearn: 0.20.3\r\n numpy: 1.15.4\r\n scipy: 1.2.0\r\n Cython: 0.29.2\r\n pandas: 0.23.4\r\n\r\n\n", - "golden_patch": "diff --git a/sklearn/cluster/_affinity_propagation.py b/sklearn/cluster/_affinity_propagation.py\n--- a/sklearn/cluster/_affinity_propagation.py\n+++ b/sklearn/cluster/_affinity_propagation.py\n@@ -194,17 +194,19 @@ def affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200,\n unconverged = (np.sum((se == convergence_iter) + (se == 0))\n != n_samples)\n if (not unconverged and (K > 0)) or (it == max_iter):\n+ never_converged = False\n if verbose:\n print(\"Converged after %d iterations.\" % it)\n break\n else:\n+ never_converged = True\n if verbose:\n print(\"Did not converge\")\n \n I = np.flatnonzero(E)\n K = I.size # Identify exemplars\n \n- if K > 0:\n+ if K > 0 and not never_converged:\n c = np.argmax(S[:, I], axis=1)\n c[I] = np.arange(K) # Identify clusters\n # Refine the final set of exemplars and clusters and return results\n@@ -408,6 +410,7 @@ def predict(self, X):\n Cluster labels.\n \"\"\"\n check_is_fitted(self)\n+ X = check_array(X)\n if not hasattr(self, \"cluster_centers_\"):\n raise ValueError(\"Predict method is not supported when \"\n \"affinity='precomputed'.\")\n", - "expected_spans": { - "sklearn/cluster/_affinity_propagation.py": [ - "affinity_propagation", - "AffinityPropagation.predict" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "scikit-learn__scikit-learn-15535", - "repo": "scikit-learn/scikit-learn", - "base_commit": "70b0ddea992c01df1a41588fa9e2d130fb6b13f8", - "problem_statement": "regression in input validation of clustering metrics\n```python\r\nfrom sklearn.metrics.cluster import mutual_info_score\r\nimport numpy as np\r\n\r\nx = np.random.choice(['a', 'b'], size=20).astype(object)\r\nmutual_info_score(x, x)\r\n```\r\nValueError: could not convert string to float: 'b'\r\n\r\nwhile\r\n```python\r\nx = np.random.choice(['a', 'b'], size=20)\r\nmutual_info_score(x, x)\r\n```\r\nworks with a warning?\r\n\r\nthis worked in 0.21.1 without a warning (as I think it should)\r\n\r\n\r\nEdit by @ogrisel: I removed the `.astype(object)` in the second code snippet.\n", - "golden_patch": "diff --git a/sklearn/metrics/cluster/_supervised.py b/sklearn/metrics/cluster/_supervised.py\n--- a/sklearn/metrics/cluster/_supervised.py\n+++ b/sklearn/metrics/cluster/_supervised.py\n@@ -43,10 +43,10 @@ def check_clusterings(labels_true, labels_pred):\n The predicted labels.\n \"\"\"\n labels_true = check_array(\n- labels_true, ensure_2d=False, ensure_min_samples=0\n+ labels_true, ensure_2d=False, ensure_min_samples=0, dtype=None,\n )\n labels_pred = check_array(\n- labels_pred, ensure_2d=False, ensure_min_samples=0\n+ labels_pred, ensure_2d=False, ensure_min_samples=0, dtype=None,\n )\n \n # input checks\n", - "expected_spans": { - "sklearn/metrics/cluster/_supervised.py": [ - "check_clusterings" - ] - }, - "resolved_by": [ + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } + }, { - "name": "20240402_sweagent_claude3opus", - "patch": "diff --git a/reproduce.py b/reproduce.py\nnew file mode 100644\nindex 000000000..7bca5ad21\n--- /dev/null\n+++ b/reproduce.py\n@@ -0,0 +1,5 @@\n+from sklearn.metrics.cluster import mutual_info_score\n+import numpy as np\n+\n+x = np.random.choice(['a', 'b'], size=20)\n+mutual_info_score(x, x)\ndiff --git a/sklearn/metrics/cluster/_supervised.py b/sklearn/metrics/cluster/_supervised.py\nindex 4bddae35c..869237f56 100644\n--- a/sklearn/metrics/cluster/_supervised.py\n+++ b/sklearn/metrics/cluster/_supervised.py\n@@ -43,10 +43,10 @@ def check_clusterings(labels_true, labels_pred):\n The predicted labels.\n \"\"\"\n labels_true = check_array(\n- labels_true, ensure_2d=False, ensure_min_samples=0\n+ labels_true, ensure_2d=False, ensure_min_samples=0, dtype=None\n )\n labels_pred = check_array(\n- labels_pred, ensure_2d=False, ensure_min_samples=0\n+ labels_pred, ensure_2d=False, ensure_min_samples=0, dtype=None\n )\n \n # input checks\n" + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sympy/matrices/common.py": [ + "docstring", + "imports", + "MatrixRequired._new", + "MatrixRequired.__eq__", + "MatrixRequired.__getitem__", + "MatrixRequired.__len__", + "MatrixShaping", + "MatrixShaping._eval_col_del", + "MatrixShaping._eval_col_insert", + "MatrixShaping._eval_col_join", + "MatrixShaping._eval_extract", + "MatrixShaping._eval_get_diag_blocks", + "MatrixShaping._eval_row_del", + "MatrixShaping._eval_row_join", + "MatrixShaping._eval_tolist", + "MatrixShaping._eval_vec", + "MatrixShaping.col_del", + "MatrixShaping.col_insert", + "MatrixShaping.col_join", + "MatrixShaping.extract", + "MatrixShaping.hstack", + "MatrixShaping.reshape", + "MatrixShaping.row_del", + "MatrixShaping.row_insert", + "MatrixShaping.row_join", + "MatrixShaping.shape", + "MatrixShaping.vstack", + "MatrixSpecial._eval_diag", + "MatrixSpecial._eval_eye", + "MatrixSpecial._eval_jordan_block", + "MatrixSpecial._eval_ones", + "MatrixSpecial._eval_zeros", + "MatrixSpecial.diag", + "MatrixSpecial.eye", + "MatrixSpecial.jordan_block", + "MatrixSpecial.ones", + "MatrixSpecial.zeros", + "MatrixProperties._eval_is_anti_symmetric", + "MatrixProperties._eval_is_diagonal", + "MatrixProperties._eval_is_matrix_hermitian", + "MatrixProperties._eval_is_Identity", + "MatrixProperties._eval_is_lower_hessenberg", + "MatrixProperties._eval_is_lower", + "MatrixProperties._eval_is_symbolic", + "MatrixProperties._eval_is_symmetric", + "MatrixProperties._eval_is_upper_hessenberg", + "MatrixProperties.atoms", + "MatrixProperties.is_anti_symmetric", + "MatrixProperties.is_hermitian", + "MatrixProperties.is_lower_hessenberg", + "MatrixProperties.is_symmetric", + "MatrixProperties.is_upper", + "MatrixOperations._eval_as_real_imag", + "MatrixOperations._eval_conjugate", + "MatrixOperations._eval_permute_cols", + "MatrixOperations._eval_permute_rows", + "MatrixOperations.applyfunc", + "MatrixOperations.expand", + "MatrixOperations.permute", + "MatrixOperations.subs", + "MatrixOperations:3", + "MatrixOperations.xreplace", + "MatrixArithmetic", + "MatrixArithmetic._eval_Abs", + "MatrixArithmetic._eval_add", + "MatrixArithmetic._eval_matrix_mul", + "MatrixArithmetic._eval_matrix_mul_elementwise", + "MatrixArithmetic._eval_matrix_rmul", + "MatrixArithmetic._eval_scalar_mul", + "MatrixArithmetic._eval_scalar_rmul", + "MatrixArithmetic.__add__", + "MatrixArithmetic.__mul__", + "MatrixArithmetic.__pow__", + "MatrixArithmetic.__rmul__", + "MatrixArithmetic.__rsub__", + "MatrixArithmetic.__sub__", + "MatrixArithmetic.multiply_elementwise", + "MatrixCommon", + "_MinimalMatrix", + "_MinimalMatrix.__init__", + "_MinimalMatrix.__getitem__", + "_MinimalMatrix.__len__", + "_MinimalMatrix.__repr__", + "_MinimalMatrix.shape", + "a2idx", + "classof" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "docstring", + "imports", + "MatrixRequired._new", + "MatrixRequired.__eq__", + "MatrixRequired.__getitem__", + "MatrixRequired.__len__", + "MatrixShaping", + "MatrixShaping._eval_col_del", + "MatrixShaping._eval_col_insert", + "MatrixShaping._eval_col_join", + "MatrixShaping._eval_extract", + "MatrixShaping._eval_get_diag_blocks", + "MatrixShaping._eval_row_del", + "MatrixShaping._eval_row_join", + "MatrixShaping._eval_tolist", + "MatrixShaping._eval_vec", + "MatrixShaping.col_del", + "MatrixShaping.col_insert", + "MatrixShaping.col_join", + "MatrixShaping.extract", + "MatrixShaping.hstack", + "MatrixShaping.reshape", + "MatrixShaping.row_del", + "MatrixShaping.row_insert", + "MatrixShaping.row_join", + "MatrixShaping.shape", + "MatrixShaping.vstack", + "MatrixSpecial._eval_diag", + "MatrixSpecial._eval_eye", + "MatrixSpecial._eval_jordan_block", + "MatrixSpecial._eval_ones", + "MatrixSpecial._eval_zeros", + "MatrixSpecial.diag", + "MatrixSpecial.eye", + "MatrixSpecial.jordan_block", + "MatrixSpecial.ones", + "MatrixSpecial.zeros", + "MatrixProperties._eval_is_anti_symmetric", + "MatrixProperties._eval_is_diagonal", + "MatrixProperties._eval_is_matrix_hermitian", + "MatrixProperties._eval_is_Identity", + "MatrixProperties._eval_is_lower_hessenberg", + "MatrixProperties._eval_is_lower", + "MatrixProperties._eval_is_symbolic", + "MatrixProperties._eval_is_symmetric", + "MatrixProperties._eval_is_upper_hessenberg", + "MatrixProperties.atoms", + "MatrixProperties.is_anti_symmetric", + "MatrixProperties.is_hermitian", + "MatrixProperties.is_lower_hessenberg", + "MatrixProperties.is_symmetric", + "MatrixProperties.is_upper", + "MatrixOperations._eval_as_real_imag", + "MatrixOperations._eval_conjugate", + "MatrixOperations._eval_permute_cols", + "MatrixOperations._eval_permute_rows", + "MatrixOperations.applyfunc", + "MatrixOperations.expand", + "MatrixOperations.permute", + "MatrixOperations.subs", + "MatrixOperations:3", + "MatrixOperations.xreplace", + "MatrixArithmetic", + "MatrixArithmetic._eval_Abs", + "MatrixArithmetic._eval_add", + "MatrixArithmetic._eval_matrix_mul", + "MatrixArithmetic._eval_matrix_mul_elementwise", + "MatrixArithmetic._eval_matrix_rmul", + "MatrixArithmetic._eval_scalar_mul", + "MatrixArithmetic._eval_scalar_rmul", + "MatrixArithmetic.__add__", + "MatrixArithmetic.__mul__", + "MatrixArithmetic.__pow__", + "MatrixArithmetic.__rmul__", + "MatrixArithmetic.__rsub__", + "MatrixArithmetic.__sub__", + "MatrixArithmetic.multiply_elementwise", + "MatrixCommon", + "_MinimalMatrix", + "_MinimalMatrix.__init__", + "_MinimalMatrix.__getitem__", + "_MinimalMatrix.__len__", + "_MinimalMatrix.__repr__", + "_MinimalMatrix.shape", + "a2idx", + "classof" + ] + } }, { - "name": "20240402_sweagent_gpt4", - "patch": "diff --git a/sklearn/metrics/cluster/_supervised.py b/sklearn/metrics/cluster/_supervised.py\nindex 4bddae35c..869237f56 100644\n--- a/sklearn/metrics/cluster/_supervised.py\n+++ b/sklearn/metrics/cluster/_supervised.py\n@@ -43,10 +43,10 @@ def check_clusterings(labels_true, labels_pred):\n The predicted labels.\n \"\"\"\n labels_true = check_array(\n- labels_true, ensure_2d=False, ensure_min_samples=0\n+ labels_true, ensure_2d=False, ensure_min_samples=0, dtype=None\n )\n labels_pred = check_array(\n- labels_pred, ensure_2d=False, ensure_min_samples=0\n+ labels_pred, ensure_2d=False, ensure_min_samples=0, dtype=None\n )\n \n # input checks\n" + "name": "20240820_honeycomb", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ], + "sympy/matrices/tests/test_matrices.py": [ + "test_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } }, { - "name": "20240523_aider", - "patch": "diff --git a/sklearn/metrics/cluster/_supervised.py b/sklearn/metrics/cluster/_supervised.py\nindex 4bddae35c..f96e2c9b2 100644\n--- a/sklearn/metrics/cluster/_supervised.py\n+++ b/sklearn/metrics/cluster/_supervised.py\n@@ -43,12 +43,17 @@ def check_clusterings(labels_true, labels_pred):\n The predicted labels.\n \"\"\"\n labels_true = check_array(\n- labels_true, ensure_2d=False, ensure_min_samples=0\n+ labels_true, ensure_2d=False, ensure_min_samples=0, dtype=None\n )\n labels_pred = check_array(\n- labels_pred, ensure_2d=False, ensure_min_samples=0\n+ labels_pred, ensure_2d=False, ensure_min_samples=0, dtype=None\n )\n \n+ if labels_true.dtype == object:\n+ labels_true = labels_true.astype(str)\n+ if labels_pred.dtype == object:\n+ labels_pred = labels_pred.astype(str)\n+\n # input checks\n if labels_true.ndim != 1:\n raise ValueError(\n" + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_del", + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_del", + "MatrixShaping._eval_col_insert" + ] + } }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/sklearn/metrics/cluster/_supervised.py b/sklearn/metrics/cluster/_supervised.py\nindex 4bddae35c..869237f56 100644\n--- a/sklearn/metrics/cluster/_supervised.py\n+++ b/sklearn/metrics/cluster/_supervised.py\n@@ -43,10 +43,10 @@ def check_clusterings(labels_true, labels_pred):\n The predicted labels.\n \"\"\"\n labels_true = check_array(\n- labels_true, ensure_2d=False, ensure_min_samples=0\n+ labels_true, ensure_2d=False, ensure_min_samples=0, dtype=None\n )\n labels_pred = check_array(\n- labels_pred, ensure_2d=False, ensure_min_samples=0\n+ labels_pred, ensure_2d=False, ensure_min_samples=0, dtype=None\n )\n \n # input checks\n" + "name": "20240829_Isoform", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } }, { "name": "20240604_CodeR", - "patch": "\ndiff --git a/output.txt b/output.txt\nnew file mode 100644\nindex 000000000..7063987e5\n--- /dev/null\n+++ b/output.txt\n@@ -0,0 +1,2 @@\n+Attempting to calculate mutual_info_score with object type array:\n+\\nAttempting to calculate mutual_info_score with default type array:\ndiff --git a/sklearn/metrics/cluster/_supervised.py b/sklearn/metrics/cluster/_supervised.py\nindex 4bddae35c..869237f56 100644\n--- a/sklearn/metrics/cluster/_supervised.py\n+++ b/sklearn/metrics/cluster/_supervised.py\n@@ -43,10 +43,10 @@ def check_clusterings(labels_true, labels_pred):\n The predicted labels.\n \"\"\"\n labels_true = check_array(\n- labels_true, ensure_2d=False, ensure_min_samples=0\n+ labels_true, ensure_2d=False, ensure_min_samples=0, dtype=None\n )\n labels_pred = check_array(\n- labels_pred, ensure_2d=False, ensure_min_samples=0\n+ labels_pred, ensure_2d=False, ensure_min_samples=0, dtype=None\n )\n \n # input checks\ndiff --git a/sklearn/utils/validation.py b/sklearn/utils/validation.py\nindex dad56850f..ff0c58aba 100644\n--- a/sklearn/utils/validation.py\n+++ b/sklearn/utils/validation.py\n@@ -551,7 +551,8 @@ def check_array(array, accept_sparse=False, accept_large_sparse=True,\n \n # make sure we actually converted to numeric:\n if dtype_numeric and array.dtype.kind == \"O\":\n- array = array.astype(np.float64)\n+ if not all(isinstance(item, str) for item in np.asarray(array).ravel()):\n+ array = array.astype(np.float64)\n if not allow_nd and array.ndim >= 3:\n raise ValueError(\"Found array with dim %d. %s expected <= 2.\"\n % (array.ndim, estimator_name))\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "django__django-12113", - "repo": "django/django", - "base_commit": "62254c5202e80a68f4fe6572a2be46a3d953de1a", - "problem_statement": "admin_views.test_multidb fails with persistent test SQLite database.\nDescription\n\t \n\t\t(last modified by Mariusz Felisiak)\n\t \nI've tried using persistent SQLite databases for the tests (to make use of\n--keepdb), but at least some test fails with:\nsqlite3.OperationalError: database is locked\nThis is not an issue when only using TEST[\"NAME\"] with \"default\" (which is good enough in terms of performance).\ndiff --git i/tests/test_sqlite.py w/tests/test_sqlite.py\nindex f1b65f7d01..9ce4e32e14 100644\n--- i/tests/test_sqlite.py\n+++ w/tests/test_sqlite.py\n@@ -15,9 +15,15 @@\n DATABASES = {\n\t 'default': {\n\t\t 'ENGINE': 'django.db.backends.sqlite3',\n+\t\t'TEST': {\n+\t\t\t'NAME': 'test_default.sqlite3'\n+\t\t},\n\t },\n\t 'other': {\n\t\t 'ENGINE': 'django.db.backends.sqlite3',\n+\t\t'TEST': {\n+\t\t\t'NAME': 'test_other.sqlite3'\n+\t\t},\n\t }\n }\n% tests/runtests.py admin_views.test_multidb -v 3 --keepdb --parallel 1\n\u2026\nOperations to perform:\n Synchronize unmigrated apps: admin_views, auth, contenttypes, messages, sessions, staticfiles\n Apply all migrations: admin, sites\nRunning pre-migrate handlers for application contenttypes\nRunning pre-migrate handlers for application auth\nRunning pre-migrate handlers for application sites\nRunning pre-migrate handlers for application sessions\nRunning pre-migrate handlers for application admin\nRunning pre-migrate handlers for application admin_views\nSynchronizing apps without migrations:\n Creating tables...\n\tRunning deferred SQL...\nRunning migrations:\n No migrations to apply.\nRunning post-migrate handlers for application contenttypes\nRunning post-migrate handlers for application auth\nRunning post-migrate handlers for application sites\nRunning post-migrate handlers for application sessions\nRunning post-migrate handlers for application admin\nRunning post-migrate handlers for application admin_views\nSystem check identified no issues (0 silenced).\nERROR\n======================================================================\nERROR: setUpClass (admin_views.test_multidb.MultiDatabaseTests)\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"\u2026/Vcs/django/django/db/backends/utils.py\", line 84, in _execute\n\treturn self.cursor.execute(sql, params)\n File \"\u2026/Vcs/django/django/db/backends/sqlite3/base.py\", line 391, in execute\n\treturn Database.Cursor.execute(self, query, params)\nsqlite3.OperationalError: database is locked\nThe above exception was the direct cause of the following exception:\nTraceback (most recent call last):\n File \"\u2026/Vcs/django/django/test/testcases.py\", line 1137, in setUpClass\n\tcls.setUpTestData()\n File \"\u2026/Vcs/django/tests/admin_views/test_multidb.py\", line 40, in setUpTestData\n\tusername='admin', password='something', email='test@test.org',\n File \"\u2026/Vcs/django/django/contrib/auth/models.py\", line 158, in create_superuser\n\treturn self._create_user(username, email, password, **extra_fields)\n File \"\u2026/Vcs/django/django/contrib/auth/models.py\", line 141, in _create_user\n\tuser.save(using=self._db)\n File \"\u2026/Vcs/django/django/contrib/auth/base_user.py\", line 66, in save\n\tsuper().save(*args, **kwargs)\n File \"\u2026/Vcs/django/django/db/models/base.py\", line 741, in save\n\tforce_update=force_update, update_fields=update_fields)\n File \"\u2026/Vcs/django/django/db/models/base.py\", line 779, in save_base\n\tforce_update, using, update_fields,\n File \"\u2026/Vcs/django/django/db/models/base.py\", line 870, in _save_table\n\tresult = self._do_insert(cls._base_manager, using, fields, update_pk, raw)\n File \"\u2026/Vcs/django/django/db/models/base.py\", line 908, in _do_insert\n\tusing=using, raw=raw)\n File \"\u2026/Vcs/django/django/db/models/manager.py\", line 82, in manager_method\n\treturn getattr(self.get_queryset(), name)(*args, **kwargs)\n File \"\u2026/Vcs/django/django/db/models/query.py\", line 1175, in _insert\n\treturn query.get_compiler(using=using).execute_sql(return_id)\n File \"\u2026/Vcs/django/django/db/models/sql/compiler.py\", line 1321, in execute_sql\n\tcursor.execute(sql, params)\n File \"\u2026/Vcs/django/django/db/backends/utils.py\", line 67, in execute\n\treturn self._execute_with_wrappers(sql, params, many=False, executor=self._execute)\n File \"\u2026/Vcs/django/django/db/backends/utils.py\", line 76, in _execute_with_wrappers\n\treturn executor(sql, params, many, context)\n File \"\u2026/Vcs/django/django/db/backends/utils.py\", line 84, in _execute\n\treturn self.cursor.execute(sql, params)\n File \"\u2026/Vcs/django/django/db/utils.py\", line 89, in __exit__\n\traise dj_exc_value.with_traceback(traceback) from exc_value\n File \"\u2026/Vcs/django/django/db/backends/utils.py\", line 84, in _execute\n\treturn self.cursor.execute(sql, params)\n File \"\u2026/Vcs/django/django/db/backends/sqlite3/base.py\", line 391, in execute\n\treturn Database.Cursor.execute(self, query, params)\ndjango.db.utils.OperationalError: database is locked\n", - "golden_patch": "diff --git a/django/db/backends/sqlite3/creation.py b/django/db/backends/sqlite3/creation.py\n--- a/django/db/backends/sqlite3/creation.py\n+++ b/django/db/backends/sqlite3/creation.py\n@@ -98,4 +98,6 @@ def test_db_signature(self):\n sig = [self.connection.settings_dict['NAME']]\n if self.is_in_memory_db(test_database_name):\n sig.append(self.connection.alias)\n+ else:\n+ sig.append(test_database_name)\n return tuple(sig)\n", - "expected_spans": { - "django/db/backends/sqlite3/creation.py": [ - "DatabaseCreation", - "DatabaseCreation.test_db_signature" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "django__django-12125", - "repo": "django/django", - "base_commit": "89d41cba392b759732ba9f1db4ff29ed47da6a56", - "problem_statement": "makemigrations produces incorrect path for inner classes\nDescription\n\t\nWhen you define a subclass from django.db.models.Field as an inner class of some other class, and use this field inside a django.db.models.Model class, then when you run manage.py makemigrations, a migrations file is created which refers to the inner class as if it were a top-level class of the module it is in.\nTo reproduce, create the following as your model:\nclass Outer(object):\n\tclass Inner(models.CharField):\n\t\tpass\nclass A(models.Model):\n\tfield = Outer.Inner(max_length=20)\nAfter running manage.py makemigrations, the generated migrations file contains the following:\nmigrations.CreateModel(\n\tname='A',\n\tfields=[\n\t\t('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n\t\t('field', test1.models.Inner(max_length=20)),\n\t],\n),\nNote the test1.models.Inner, which should have been test1.models.Outer.Inner.\nThe real life case involved an EnumField from django-enumfields, defined as an inner class of a Django Model class, similar to this:\nimport enum\nfrom enumfields import Enum, EnumField\nclass Thing(models.Model):\n\t@enum.unique\n\tclass State(Enum):\n\t\ton = 'on'\n\t\toff = 'off'\n\tstate = EnumField(enum=State)\nThis results in the following migrations code:\nmigrations.CreateModel(\n\tname='Thing',\n\tfields=[\n\t\t('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n\t\t('state', enumfields.fields.EnumField(enum=test1.models.State, max_length=10)),\n\t],\n),\nThis refers to test1.models.State, instead of to test1.models.Thing.State.\n", - "golden_patch": "diff --git a/django/db/migrations/serializer.py b/django/db/migrations/serializer.py\n--- a/django/db/migrations/serializer.py\n+++ b/django/db/migrations/serializer.py\n@@ -269,7 +269,7 @@ def serialize(self):\n if module == builtins.__name__:\n return self.value.__name__, set()\n else:\n- return \"%s.%s\" % (module, self.value.__name__), {\"import %s\" % module}\n+ return \"%s.%s\" % (module, self.value.__qualname__), {\"import %s\" % module}\n \n \n class UUIDSerializer(BaseSerializer):\n", - "expected_spans": { - "django/db/migrations/serializer.py": [ - "TypeSerializer", - "TypeSerializer.serialize" - ] - }, - "resolved_by": [ - { - "name": "20240523_aider", - "patch": "diff --git a/django/db/migrations/writer.py b/django/db/migrations/writer.py\nindex 4918261fb0..8bfe392a86 100644\n--- a/django/db/migrations/writer.py\n+++ b/django/db/migrations/writer.py\n@@ -268,7 +268,17 @@ class MigrationWriter:\n \n @classmethod\n def serialize(cls, value):\n- return serializer_factory(value).serialize()\n+ serialized_value, imports = serializer_factory(value).serialize()\n+ if isinstance(value, type) and value.__module__ != 'builtins':\n+ module = value.__module__\n+ qualname = value.__qualname__\n+ if '.' in qualname:\n+ # Handle inner classes\n+ parts = qualname.split('.')\n+ module += '.' + '.'.join(parts[:-1])\n+ qualname = parts[-1]\n+ serialized_value = f'{module}.{qualname}'\n+ return serialized_value, imports\n \n @classmethod\n def register_serializer(cls, type_, serializer):\n" + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } }, { - "name": "20240612_IBM_Research_Agent101", - "patch": "\ndiff --git a/django/db/migrations/serializer.py b/django/db/migrations/serializer.py\nindex 7d226955d5..f4475645de 100644\n--- a/django/db/migrations/serializer.py\n+++ b/django/db/migrations/serializer.py\n@@ -269,7 +269,26 @@ class TypeSerializer(BaseSerializer):\n if module == builtins.__name__:\n return self.value.__name__, set()\n else:\n+ # Check if the type is an inner class or a deconstructible class\n+ if hasattr(self.value, '__qualname__'):\n+ qualname_parts = self.value.__qualname__.split('.')\n+ if len(qualname_parts) > 1:\n+ # Serialize inner class with the containing class's name as a prefix\n+ path = \"%s.%s\" % (module, self.value.__qualname__)\n+ # Check if the class has a deconstruct method\n+ if hasattr(self.value, 'deconstruct'):\n+ # Create an instance to call deconstruct\n+ instance = self.value()\n+ # Ensure deconstructible classes are serialized with their full path\n+ path = instance.deconstruct()[1]\n+ return path, {\"import %s\" % module}\n return \"%s.%s\" % (module, self.value.__name__), {\"import %s\" % module}\n+ return \"%s.%s\" % (module, self.value.__name__), {\"import %s\" % module}\n+ if hasattr(self.value, '__qualname__') and '.' in self.value.__qualname__:\n+ # Serialize inner class with the containing class's name as a prefix\n+ return \"%s.%s\" % (module, self.value.__qualname__), {\"import %s\" % module}\n+ else:\n+ return \"%s.%s\" % (module, self.value.__name__), {\"import %s\" % module}\n \n \n class UUIDSerializer(BaseSerializer):\n" - } - ], - "alternative_spans": [] - }, - { - "instance_id": "django__django-12184", - "repo": "django/django", - "base_commit": "5d674eac871a306405b0fbbaeb17bbeba9c68bf3", - "problem_statement": "Optional URL params crash some view functions.\nDescription\n\t\nMy use case, running fine with Django until 2.2:\nURLConf:\nurlpatterns += [\n\t...\n\tre_path(r'^module/(?P(html|json|xml))?/?$', views.modules, name='modules'),\n]\nView:\ndef modules(request, format='html'):\n\t...\n\treturn render(...)\nWith Django 3.0, this is now producing an error:\nTraceback (most recent call last):\n File \"/l10n/venv/lib/python3.6/site-packages/django/core/handlers/exception.py\", line 34, in inner\n\tresponse = get_response(request)\n File \"/l10n/venv/lib/python3.6/site-packages/django/core/handlers/base.py\", line 115, in _get_response\n\tresponse = self.process_exception_by_middleware(e, request)\n File \"/l10n/venv/lib/python3.6/site-packages/django/core/handlers/base.py\", line 113, in _get_response\n\tresponse = wrapped_callback(request, *callback_args, **callback_kwargs)\nException Type: TypeError at /module/\nException Value: modules() takes from 1 to 2 positional arguments but 3 were given\n", - "golden_patch": "diff --git a/django/urls/resolvers.py b/django/urls/resolvers.py\n--- a/django/urls/resolvers.py\n+++ b/django/urls/resolvers.py\n@@ -158,8 +158,9 @@ def match(self, path):\n # If there are any named groups, use those as kwargs, ignoring\n # non-named groups. Otherwise, pass all non-named arguments as\n # positional arguments.\n- kwargs = {k: v for k, v in match.groupdict().items() if v is not None}\n+ kwargs = match.groupdict()\n args = () if kwargs else match.groups()\n+ kwargs = {k: v for k, v in kwargs.items() if v is not None}\n return path[match.end():], args, kwargs\n return None\n \n", - "expected_spans": { - "django/urls/resolvers.py": [ - "RegexPattern.match" - ] - }, - "resolved_by": [], - "alternative_spans": [] - }, - { - "instance_id": "sympy__sympy-18057", - "repo": "sympy/sympy", - "base_commit": "62000f37b8821573ba00280524ffb4ac4a380875", - "problem_statement": "Sympy incorrectly attempts to eval reprs in its __eq__ method\nPassing strings produced by unknown objects into eval is **very bad**. It is especially surprising for an equality check to trigger that kind of behavior. This should be fixed ASAP.\r\n\r\nRepro code:\r\n\r\n```\r\nimport sympy\r\nclass C:\r\n def __repr__(self):\r\n return 'x.y'\r\n_ = sympy.Symbol('x') == C()\r\n```\r\n\r\nResults in:\r\n\r\n```\r\nE AttributeError: 'Symbol' object has no attribute 'y'\r\n```\r\n\r\nOn the line:\r\n\r\n```\r\n expr = eval(\r\n code, global_dict, local_dict) # take local objects in preference\r\n```\r\n\r\nWhere code is:\r\n\r\n```\r\nSymbol ('x' ).y\r\n```\r\n\r\nFull trace:\r\n\r\n```\r\nFAILED [100%]\r\n class C:\r\n def __repr__(self):\r\n return 'x.y'\r\n \r\n> _ = sympy.Symbol('x') == C()\r\n\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\nsympy/core/expr.py:124: in __eq__\r\n other = sympify(other)\r\nsympy/core/sympify.py:385: in sympify\r\n expr = parse_expr(a, local_dict=locals, transformations=transformations, evaluate=evaluate)\r\nsympy/parsing/sympy_parser.py:1011: in parse_expr\r\n return eval_expr(code, local_dict, global_dict)\r\nsympy/parsing/sympy_parser.py:906: in eval_expr\r\n code, global_dict, local_dict) # take local objects in preference\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n\r\n> ???\r\nE AttributeError: 'Symbol' object has no attribute 'y'\r\n\r\n:1: AttributeError\r\n```\r\n\r\nRelated issue: an unknown object whose repr is `x` will incorrectly compare as equal to a sympy symbol x:\r\n\r\n```\r\n class C:\r\n def __repr__(self):\r\n return 'x'\r\n\r\n assert sympy.Symbol('x') != C() # fails\r\n```\n", - "golden_patch": "diff --git a/sympy/core/expr.py b/sympy/core/expr.py\n--- a/sympy/core/expr.py\n+++ b/sympy/core/expr.py\n@@ -121,7 +121,7 @@ def _hashable_content(self):\n \n def __eq__(self, other):\n try:\n- other = sympify(other)\n+ other = _sympify(other)\n if not isinstance(other, Expr):\n return False\n except (SympifyError, SyntaxError):\n", - "expected_spans": { - "sympy/core/expr.py": [ - "Expr.__eq__" - ] - }, - "resolved_by": [ + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } + }, { - "name": "20240524_opencsg_starship_gpt4", - "patch": "--- a/sympy/core/expr.py\n+++ b/sympy/core/expr.py\n@@ -120,6 +120,9 @@\n return self._args\n \n def __eq__(self, other):\n+ from .basic import Basic\n+ if not isinstance(other, Basic):\n+ return NotImplemented\n try:\n other = sympify(other)\n if not isinstance(other, Expr):\n" + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } }, { - "name": "20240527_marscode-agent-dev", - "patch": "diff --git a/sympy/core/expr.py b/sympy/core/expr.py\nindex 24d89897f7..b1e85ead5f 100644\n--- a/sympy/core/expr.py\n+++ b/sympy/core/expr.py\n@@ -121,7 +121,7 @@ def _hashable_content(self):\n \n def __eq__(self, other):\n try:\n- other = sympify(other)\n+ other = _sympify(other)\n if not isinstance(other, Expr):\n return False\n except (SympifyError, SyntaxError):\n" + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } }, { - "name": "20240609_moatless_gpt4o", - "patch": "diff --git a/sympy/core/expr.py b/sympy/core/expr.py\nindex 24d89897f7..3be2795f46 100644\n--- a/sympy/core/expr.py\n+++ b/sympy/core/expr.py\n@@ -120,26 +120,19 @@ def _hashable_content(self):\n return self._args\n \n def __eq__(self, other):\n- try:\n- other = sympify(other)\n- if not isinstance(other, Expr):\n+ if isinstance(other, Expr):\n+ if not (self.is_Number and other.is_Number) and (type(self) != type(other)):\n return False\n- except (SympifyError, SyntaxError):\n- return False\n- # check for pure number expr\n- if not (self.is_Number and other.is_Number) and (\n- type(self) != type(other)):\n- return False\n- a, b = self._hashable_content(), other._hashable_content()\n- if a != b:\n- return False\n- # check number *in* an expression\n- for a, b in zip(a, b):\n- if not isinstance(a, Expr):\n- continue\n- if a.is_Number and type(a) != type(b):\n+ a, b = self._hashable_content(), other._hashable_content()\n+ if a != b:\n return False\n- return True\n+ for a, b in zip(a, b):\n+ if not isinstance(a, Expr):\n+ continue\n+ if a.is_Number and type(a) != type(b):\n+ return False\n+ return True\n+ return repr(self) == repr(other)\n \n # ***************\n # * Arithmetics *\n" - } + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ], + "sympy/matrices/sparse.py": [ + "SparseMatrix._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ], + "sympy/matrices/tests/test_matrices.py": [ + "test_col_insert" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ], + "sympy/matrices/tests/test_matrices.py": [ + "test_deprecated" + ] + }, + "alternative_spans": { + "sympy/matrices/common.py": [ + "MatrixShaping._eval_col_insert" + ] + } + } ], "alternative_spans": [] }, { - "instance_id": "sympy__sympy-18087", - "repo": "sympy/sympy", - "base_commit": "9da013ad0ddc3cd96fe505f2e47c63e372040916", - "problem_statement": "Simplify of simple trig expression fails\ntrigsimp in various versions, including 1.5, incorrectly simplifies cos(x)+sqrt(sin(x)**2) as though it were cos(x)+sin(x) for general complex x. (Oddly it gets this right if x is real.)\r\n\r\nEmbarrassingly I found this by accident while writing sympy-based teaching material...\r\n\n", - "golden_patch": "diff --git a/sympy/core/exprtools.py b/sympy/core/exprtools.py\n--- a/sympy/core/exprtools.py\n+++ b/sympy/core/exprtools.py\n@@ -358,8 +358,8 @@ def __init__(self, factors=None): # Factors\n for f in list(factors.keys()):\n if isinstance(f, Rational) and not isinstance(f, Integer):\n p, q = Integer(f.p), Integer(f.q)\n- factors[p] = (factors[p] if p in factors else 0) + factors[f]\n- factors[q] = (factors[q] if q in factors else 0) - factors[f]\n+ factors[p] = (factors[p] if p in factors else S.Zero) + factors[f]\n+ factors[q] = (factors[q] if q in factors else S.Zero) - factors[f]\n factors.pop(f)\n if i:\n factors[I] = S.One*i\n@@ -448,14 +448,12 @@ def as_expr(self): # Factors\n args = []\n for factor, exp in self.factors.items():\n if exp != 1:\n- b, e = factor.as_base_exp()\n- if isinstance(exp, int):\n- e = _keep_coeff(Integer(exp), e)\n- elif isinstance(exp, Rational):\n+ if isinstance(exp, Integer):\n+ b, e = factor.as_base_exp()\n e = _keep_coeff(exp, e)\n+ args.append(b**e)\n else:\n- e *= exp\n- args.append(b**e)\n+ args.append(factor**exp)\n else:\n args.append(factor)\n return Mul(*args)\n", + "instance_id": "astropy__astropy-6938", + "repo": "astropy/astropy", + "base_commit": "c76af9ed6bb89bfba45b9f5bc1e635188278e2fa", + "problem_statement": "Possible bug in io.fits related to D exponents\nI came across the following code in ``fitsrec.py``:\r\n\r\n```python\r\n # Replace exponent separator in floating point numbers\r\n if 'D' in format:\r\n output_field.replace(encode_ascii('E'), encode_ascii('D'))\r\n```\r\n\r\nI think this may be incorrect because as far as I can tell ``replace`` is not an in-place operation for ``chararray`` (it returns a copy). Commenting out this code doesn't cause any tests to fail so I think this code isn't being tested anyway.\n", + "golden_patch": "diff --git a/astropy/io/fits/fitsrec.py b/astropy/io/fits/fitsrec.py\n--- a/astropy/io/fits/fitsrec.py\n+++ b/astropy/io/fits/fitsrec.py\n@@ -1261,7 +1261,7 @@ def _scale_back_ascii(self, col_idx, input_field, output_field):\n \n # Replace exponent separator in floating point numbers\n if 'D' in format:\n- output_field.replace(encode_ascii('E'), encode_ascii('D'))\n+ output_field[:] = output_field.replace(b'E', b'D')\n \n \n def _get_recarray_field(array, key):\n", + "test_patch": "diff --git a/astropy/io/fits/tests/test_checksum.py b/astropy/io/fits/tests/test_checksum.py\n--- a/astropy/io/fits/tests/test_checksum.py\n+++ b/astropy/io/fits/tests/test_checksum.py\n@@ -205,9 +205,9 @@ def test_ascii_table_data(self):\n # The checksum ends up being different on Windows, possibly due\n # to slight floating point differences\n assert 'CHECKSUM' in hdul[1].header\n- assert hdul[1].header['CHECKSUM'] == '51IDA1G981GCA1G9'\n+ assert hdul[1].header['CHECKSUM'] == '3rKFAoI94oICAoI9'\n assert 'DATASUM' in hdul[1].header\n- assert hdul[1].header['DATASUM'] == '1948208413'\n+ assert hdul[1].header['DATASUM'] == '1914653725'\n \n def test_compressed_image_data(self):\n with fits.open(self.data('comp.fits')) as h1:\ndiff --git a/astropy/io/fits/tests/test_table.py b/astropy/io/fits/tests/test_table.py\n--- a/astropy/io/fits/tests/test_table.py\n+++ b/astropy/io/fits/tests/test_table.py\n@@ -298,6 +298,19 @@ def test_ascii_table(self):\n hdul = fits.open(self.temp('toto.fits'))\n assert comparerecords(hdu.data, hdul[1].data)\n hdul.close()\n+\n+ # Test Scaling\n+\n+ r1 = np.array([11., 12.])\n+ c2 = fits.Column(name='def', format='D', array=r1, bscale=2.3,\n+ bzero=0.6)\n+ hdu = fits.TableHDU.from_columns([c2])\n+ hdu.writeto(self.temp('toto.fits'), overwrite=True)\n+ with open(self.temp('toto.fits')) as f:\n+ assert '4.95652173913043548D+00' in f.read()\n+ with fits.open(self.temp('toto.fits')) as hdul:\n+ assert comparerecords(hdu.data, hdul[1].data)\n+\n a.close()\n \n def test_endianness(self):\n", + "fail_to_pass": "[\"astropy/io/fits/tests/test_checksum.py::TestChecksumFunctions::test_ascii_table_data\", \"astropy/io/fits/tests/test_table.py::TestTableFunctions::test_ascii_table\"]", + "pass_to_pass": "[\"astropy/io/fits/tests/test_checksum.py::TestChecksumFunctions::test_sample_file\", \"astropy/io/fits/tests/test_checksum.py::TestChecksumFunctions::test_image_create\", \"astropy/io/fits/tests/test_checksum.py::TestChecksumFunctions::test_scaled_data\", \"astropy/io/fits/tests/test_checksum.py::TestChecksumFunctions::test_scaled_data_auto_rescale\", \"astropy/io/fits/tests/test_checksum.py::TestChecksumFunctions::test_uint16_data\", \"astropy/io/fits/tests/test_checksum.py::TestChecksumFunctions::test_groups_hdu_data\", \"astropy/io/fits/tests/test_checksum.py::TestChecksumFunctions::test_open_with_no_keywords\", \"astropy/io/fits/tests/test_checksum.py::TestChecksumFunctions::test_writeto_convenience\", \"astropy/io/fits/tests/test_checksum.py::TestChecksumFunctions::test_hdu_writeto\", \"astropy/io/fits/tests/test_checksum.py::TestChecksumFunctions::test_datasum_only\", \"astropy/io/fits/tests/test_table.py::test_regression_scalar_indexing\"]", "expected_spans": { - "sympy/core/exprtools.py": [ - "Factors", - "Factors.as_expr" + "astropy/io/fits/fitsrec.py": [ + "FITS_rec", + "FITS_rec._scale_back_ascii" ] }, - "resolved_by": [], - "alternative_spans": [] + "test_file_spans": { + "astropy/io/fits/tests/test_checksum.py": [ + "TestChecksumFunctions.test_ascii_table_data" + ], + "astropy/io/fits/tests/test_table.py": [ + "TestTableFunctions.test_ascii_table" + ] + }, + "resolved_by": [ + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "astropy/io/fits/fitsrec.py": [ + "FITS_rec", + "FITS_rec._scale_back_ascii" + ] + }, + "alternative_spans": { + "astropy/io/fits/fitsrec.py": [ + "FITS_rec", + "FITS_rec._scale_back_ascii" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "astropy/io/fits/fitsrec.py": [ + "FITS_rec", + "FITS_rec._scale_back_ascii" + ] + }, + "alternative_spans": { + "astropy/io/fits/fitsrec.py": [ + "FITS_rec", + "FITS_rec._scale_back_ascii" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "astropy/io/fits/fitsrec.py": [ + "FITS_rec", + "FITS_rec._scale_back_ascii" + ] + }, + "alternative_spans": { + "astropy/io/fits/fitsrec.py": [ + "FITS_rec", + "FITS_rec._scale_back_ascii" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "astropy/io/fits/fitsrec.py": [ + "FITS_rec._scale_back_ascii" + ] + }, + "alternative_spans": { + "astropy/io/fits/fitsrec.py": [ + "FITS_rec._scale_back_ascii" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "astropy/io/fits/fitsrec.py": [ + "FITS_rec", + "FITS_rec._scale_back_ascii" + ] + }, + "alternative_spans": { + "astropy/io/fits/fitsrec.py": [ + "FITS_rec", + "FITS_rec._scale_back_ascii" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "astropy/io/fits/fitsrec.py": [ + "FITS_rec", + "FITS_rec._scale_back_ascii" + ] + }, + "alternative_spans": { + "astropy/io/fits/fitsrec.py": [ + "FITS_rec", + "FITS_rec._scale_back_ascii" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "astropy/io/fits/fitsrec.py": [ + "FITS_rec", + "FITS_rec._scale_back_ascii" + ] + }, + "alternative_spans": { + "astropy/io/fits/fitsrec.py": [ + "FITS_rec", + "FITS_rec._scale_back_ascii" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240908_infant_gpt4o", + "spans": { + "astropy/io/fits/fitsrec.py": [ + "FITS_rec._scale_back_ascii" + ] + } + } + ] }, { - "instance_id": "sympy__sympy-18189", - "repo": "sympy/sympy", - "base_commit": "1923822ddf8265199dbd9ef9ce09641d3fd042b9", - "problem_statement": "diophantine: incomplete results depending on syms order with permute=True\n```\r\nIn [10]: diophantine(n**4 + m**4 - 2**4 - 3**4, syms=(m,n), permute=True)\r\nOut[10]: {(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)}\r\n\r\nIn [11]: diophantine(n**4 + m**4 - 2**4 - 3**4, syms=(n,m), permute=True)\r\nOut[11]: {(3, 2)}\r\n```\r\n\ndiophantine: incomplete results depending on syms order with permute=True\n```\r\nIn [10]: diophantine(n**4 + m**4 - 2**4 - 3**4, syms=(m,n), permute=True)\r\nOut[10]: {(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)}\r\n\r\nIn [11]: diophantine(n**4 + m**4 - 2**4 - 3**4, syms=(n,m), permute=True)\r\nOut[11]: {(3, 2)}\r\n```\r\n\n", - "golden_patch": "diff --git a/sympy/solvers/diophantine.py b/sympy/solvers/diophantine.py\n--- a/sympy/solvers/diophantine.py\n+++ b/sympy/solvers/diophantine.py\n@@ -182,7 +182,7 @@ def diophantine(eq, param=symbols(\"t\", integer=True), syms=None,\n if syms != var:\n dict_sym_index = dict(zip(syms, range(len(syms))))\n return {tuple([t[dict_sym_index[i]] for i in var])\n- for t in diophantine(eq, param)}\n+ for t in diophantine(eq, param, permute=permute)}\n n, d = eq.as_numer_denom()\n if n.is_number:\n return set()\n", + "instance_id": "scikit-learn__scikit-learn-10297", + "repo": "scikit-learn/scikit-learn", + "base_commit": "b90661d6a46aa3619d3eec94d5281f5888add501", + "problem_statement": "linear_model.RidgeClassifierCV's Parameter store_cv_values issue\n#### Description\r\nParameter store_cv_values error on sklearn.linear_model.RidgeClassifierCV\r\n\r\n#### Steps/Code to Reproduce\r\nimport numpy as np\r\nfrom sklearn import linear_model as lm\r\n\r\n#test database\r\nn = 100\r\nx = np.random.randn(n, 30)\r\ny = np.random.normal(size = n)\r\n\r\nrr = lm.RidgeClassifierCV(alphas = np.arange(0.1, 1000, 0.1), normalize = True, \r\n store_cv_values = True).fit(x, y)\r\n\r\n#### Expected Results\r\nExpected to get the usual ridge regression model output, keeping the cross validation predictions as attribute.\r\n\r\n#### Actual Results\r\nTypeError: __init__() got an unexpected keyword argument 'store_cv_values'\r\n\r\nlm.RidgeClassifierCV actually has no parameter store_cv_values, even though some attributes depends on it.\r\n\r\n#### Versions\r\nWindows-10-10.0.14393-SP0\r\nPython 3.6.3 |Anaconda, Inc.| (default, Oct 15 2017, 03:27:45) [MSC v.1900 64 bit (AMD64)]\r\nNumPy 1.13.3\r\nSciPy 0.19.1\r\nScikit-Learn 0.19.1\r\n\r\n\nAdd store_cv_values boolean flag support to RidgeClassifierCV\nAdd store_cv_values support to RidgeClassifierCV - documentation claims that usage of this flag is possible:\n\n> cv_values_ : array, shape = [n_samples, n_alphas] or shape = [n_samples, n_responses, n_alphas], optional\n> Cross-validation values for each alpha (if **store_cv_values**=True and `cv=None`).\n\nWhile actually usage of this flag gives \n\n> TypeError: **init**() got an unexpected keyword argument 'store_cv_values'\n\n", + "golden_patch": "diff --git a/sklearn/linear_model/ridge.py b/sklearn/linear_model/ridge.py\n--- a/sklearn/linear_model/ridge.py\n+++ b/sklearn/linear_model/ridge.py\n@@ -1212,18 +1212,18 @@ class RidgeCV(_BaseRidgeCV, RegressorMixin):\n \n store_cv_values : boolean, default=False\n Flag indicating if the cross-validation values corresponding to\n- each alpha should be stored in the `cv_values_` attribute (see\n- below). This flag is only compatible with `cv=None` (i.e. using\n+ each alpha should be stored in the ``cv_values_`` attribute (see\n+ below). This flag is only compatible with ``cv=None`` (i.e. using\n Generalized Cross-Validation).\n \n Attributes\n ----------\n cv_values_ : array, shape = [n_samples, n_alphas] or \\\n shape = [n_samples, n_targets, n_alphas], optional\n- Cross-validation values for each alpha (if `store_cv_values=True` and \\\n- `cv=None`). After `fit()` has been called, this attribute will \\\n- contain the mean squared errors (by default) or the values of the \\\n- `{loss,score}_func` function (if provided in the constructor).\n+ Cross-validation values for each alpha (if ``store_cv_values=True``\\\n+ and ``cv=None``). After ``fit()`` has been called, this attribute \\\n+ will contain the mean squared errors (by default) or the values \\\n+ of the ``{loss,score}_func`` function (if provided in the constructor).\n \n coef_ : array, shape = [n_features] or [n_targets, n_features]\n Weight vector(s).\n@@ -1301,14 +1301,19 @@ class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):\n weights inversely proportional to class frequencies in the input data\n as ``n_samples / (n_classes * np.bincount(y))``\n \n+ store_cv_values : boolean, default=False\n+ Flag indicating if the cross-validation values corresponding to\n+ each alpha should be stored in the ``cv_values_`` attribute (see\n+ below). This flag is only compatible with ``cv=None`` (i.e. using\n+ Generalized Cross-Validation).\n+\n Attributes\n ----------\n- cv_values_ : array, shape = [n_samples, n_alphas] or \\\n- shape = [n_samples, n_responses, n_alphas], optional\n- Cross-validation values for each alpha (if `store_cv_values=True` and\n- `cv=None`). After `fit()` has been called, this attribute will contain \\\n- the mean squared errors (by default) or the values of the \\\n- `{loss,score}_func` function (if provided in the constructor).\n+ cv_values_ : array, shape = [n_samples, n_targets, n_alphas], optional\n+ Cross-validation values for each alpha (if ``store_cv_values=True`` and\n+ ``cv=None``). After ``fit()`` has been called, this attribute will\n+ contain the mean squared errors (by default) or the values of the\n+ ``{loss,score}_func`` function (if provided in the constructor).\n \n coef_ : array, shape = [n_features] or [n_targets, n_features]\n Weight vector(s).\n@@ -1333,10 +1338,11 @@ class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):\n advantage of the multi-variate response support in Ridge.\n \"\"\"\n def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,\n- normalize=False, scoring=None, cv=None, class_weight=None):\n+ normalize=False, scoring=None, cv=None, class_weight=None,\n+ store_cv_values=False):\n super(RidgeClassifierCV, self).__init__(\n alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,\n- scoring=scoring, cv=cv)\n+ scoring=scoring, cv=cv, store_cv_values=store_cv_values)\n self.class_weight = class_weight\n \n def fit(self, X, y, sample_weight=None):\n", + "test_patch": "diff --git a/sklearn/linear_model/tests/test_ridge.py b/sklearn/linear_model/tests/test_ridge.py\n--- a/sklearn/linear_model/tests/test_ridge.py\n+++ b/sklearn/linear_model/tests/test_ridge.py\n@@ -575,8 +575,7 @@ def test_class_weights_cv():\n \n \n def test_ridgecv_store_cv_values():\n- # Test _RidgeCV's store_cv_values attribute.\n- rng = rng = np.random.RandomState(42)\n+ rng = np.random.RandomState(42)\n \n n_samples = 8\n n_features = 5\n@@ -589,13 +588,38 @@ def test_ridgecv_store_cv_values():\n # with len(y.shape) == 1\n y = rng.randn(n_samples)\n r.fit(x, y)\n- assert_equal(r.cv_values_.shape, (n_samples, n_alphas))\n+ assert r.cv_values_.shape == (n_samples, n_alphas)\n+\n+ # with len(y.shape) == 2\n+ n_targets = 3\n+ y = rng.randn(n_samples, n_targets)\n+ r.fit(x, y)\n+ assert r.cv_values_.shape == (n_samples, n_targets, n_alphas)\n+\n+\n+def test_ridge_classifier_cv_store_cv_values():\n+ x = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],\n+ [1.0, 1.0], [1.0, 0.0]])\n+ y = np.array([1, 1, 1, -1, -1])\n+\n+ n_samples = x.shape[0]\n+ alphas = [1e-1, 1e0, 1e1]\n+ n_alphas = len(alphas)\n+\n+ r = RidgeClassifierCV(alphas=alphas, store_cv_values=True)\n+\n+ # with len(y.shape) == 1\n+ n_targets = 1\n+ r.fit(x, y)\n+ assert r.cv_values_.shape == (n_samples, n_targets, n_alphas)\n \n # with len(y.shape) == 2\n- n_responses = 3\n- y = rng.randn(n_samples, n_responses)\n+ y = np.array([[1, 1, 1, -1, -1],\n+ [1, -1, 1, -1, 1],\n+ [-1, -1, 1, -1, -1]]).transpose()\n+ n_targets = y.shape[1]\n r.fit(x, y)\n- assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))\n+ assert r.cv_values_.shape == (n_samples, n_targets, n_alphas)\n \n \n def test_ridgecv_sample_weight():\n@@ -618,7 +642,7 @@ def test_ridgecv_sample_weight():\n gs = GridSearchCV(Ridge(), parameters, cv=cv)\n gs.fit(X, y, sample_weight=sample_weight)\n \n- assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)\n+ assert ridgecv.alpha_ == gs.best_estimator_.alpha\n assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)\n \n \n", + "fail_to_pass": "[\"sklearn/linear_model/tests/test_ridge.py::test_ridge_classifier_cv_store_cv_values\"]", + "pass_to_pass": "[\"sklearn/linear_model/tests/test_ridge.py::test_ridge\", \"sklearn/linear_model/tests/test_ridge.py::test_primal_dual_relationship\", \"sklearn/linear_model/tests/test_ridge.py::test_ridge_singular\", \"sklearn/linear_model/tests/test_ridge.py::test_ridge_regression_sample_weights\", \"sklearn/linear_model/tests/test_ridge.py::test_ridge_sample_weights\", \"sklearn/linear_model/tests/test_ridge.py::test_ridge_shapes\", \"sklearn/linear_model/tests/test_ridge.py::test_ridge_intercept\", \"sklearn/linear_model/tests/test_ridge.py::test_toy_ridge_object\", \"sklearn/linear_model/tests/test_ridge.py::test_ridge_vs_lstsq\", \"sklearn/linear_model/tests/test_ridge.py::test_ridge_individual_penalties\", \"sklearn/linear_model/tests/test_ridge.py::test_ridge_cv_sparse_svd\", \"sklearn/linear_model/tests/test_ridge.py::test_ridge_sparse_svd\", \"sklearn/linear_model/tests/test_ridge.py::test_class_weights\", \"sklearn/linear_model/tests/test_ridge.py::test_class_weight_vs_sample_weight\", \"sklearn/linear_model/tests/test_ridge.py::test_class_weights_cv\", \"sklearn/linear_model/tests/test_ridge.py::test_ridgecv_store_cv_values\", \"sklearn/linear_model/tests/test_ridge.py::test_ridgecv_sample_weight\", \"sklearn/linear_model/tests/test_ridge.py::test_raises_value_error_if_sample_weights_greater_than_1d\", \"sklearn/linear_model/tests/test_ridge.py::test_sparse_design_with_sample_weights\", \"sklearn/linear_model/tests/test_ridge.py::test_raises_value_error_if_solver_not_supported\", \"sklearn/linear_model/tests/test_ridge.py::test_sparse_cg_max_iter\", \"sklearn/linear_model/tests/test_ridge.py::test_n_iter\", \"sklearn/linear_model/tests/test_ridge.py::test_ridge_fit_intercept_sparse\", \"sklearn/linear_model/tests/test_ridge.py::test_errors_and_values_helper\", \"sklearn/linear_model/tests/test_ridge.py::test_errors_and_values_svd_helper\", \"sklearn/linear_model/tests/test_ridge.py::test_ridge_classifier_no_support_multilabel\", \"sklearn/linear_model/tests/test_ridge.py::test_dtype_match\", \"sklearn/linear_model/tests/test_ridge.py::test_dtype_match_cholesky\"]", "expected_spans": { - "sympy/solvers/diophantine.py": [ - "diophantine" + "sklearn/linear_model/ridge.py": [ + "RidgeCV", + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + }, + "test_file_spans": { + "sklearn/linear_model/tests/test_ridge.py": [ + "test_ridgecv_store_cv_values", + "test_ridgecv_sample_weight" ] }, "resolved_by": [ { - "name": "20240604_CodeR", - "patch": "\ndiff --git a/sympy/solvers/diophantine.py b/sympy/solvers/diophantine.py\nindex 6092e35dab..b43f5c1dcf 100644\n--- a/sympy/solvers/diophantine.py\n+++ b/sympy/solvers/diophantine.py\n@@ -182,7 +182,7 @@ def diophantine(eq, param=symbols(\"t\", integer=True), syms=None,\n if syms != var:\n dict_sym_index = dict(zip(syms, range(len(syms))))\n return {tuple([t[dict_sym_index[i]] for i in var])\n- for t in diophantine(eq, param)}\n+ for t in diophantine(eq, param, permute=permute)}\n n, d = eq.as_numer_denom()\n if n.is_number:\n return set()\n" + "name": "20240523_aider", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV", + "RidgeClassifierCV.classes_" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV", + "RidgeClassifierCV.classes_" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "_BaseRidgeCV.fit", + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "_BaseRidgeCV.fit", + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "docstring", + "imports", + "_solve_sparse_cg", + "_solve_lsqr", + "_solve_cholesky", + "_solve_cholesky_kernel", + "_solve_svd", + "ridge_regression", + "_BaseRidge.__init__", + "_BaseRidge.fit", + "Ridge.__init__", + "RidgeClassifier.__init__", + "RidgeClassifier.fit", + "_RidgeGCV.__init__", + "_RidgeGCV._pre_compute", + "_RidgeGCV._decomp_diag", + "_RidgeGCV._diag_dot", + "_RidgeGCV._errors_and_values_helper", + "_RidgeGCV._values", + "_RidgeGCV._pre_compute_svd", + "_RidgeGCV._errors_and_values_svd_helper", + "_RidgeGCV._values_svd", + "_RidgeGCV.fit", + "_BaseRidgeCV.__init__", + "_BaseRidgeCV.fit", + "RidgeCV", + "RidgeClassifierCV", + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "docstring", + "imports", + "_solve_sparse_cg", + "_solve_lsqr", + "_solve_cholesky", + "_solve_cholesky_kernel", + "_solve_svd", + "ridge_regression", + "_BaseRidge.__init__", + "_BaseRidge.fit", + "Ridge.__init__", + "RidgeClassifier.__init__", + "RidgeClassifier.fit", + "_RidgeGCV.__init__", + "_RidgeGCV._pre_compute", + "_RidgeGCV._decomp_diag", + "_RidgeGCV._diag_dot", + "_RidgeGCV._errors_and_values_helper", + "_RidgeGCV._values", + "_RidgeGCV._pre_compute_svd", + "_RidgeGCV._errors_and_values_svd_helper", + "_RidgeGCV._values_svd", + "_RidgeGCV.fit", + "_BaseRidgeCV.__init__", + "_BaseRidgeCV.fit", + "RidgeCV", + "RidgeClassifierCV", + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV", + "RidgeClassifierCV.classes_" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV", + "RidgeClassifierCV.classes_" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ], + "sklearn/linear_model/tests/test_ridge.py": [ + "test_ridgecv_store_cv_values" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + }, + "alternative_spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240523_aider", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "run_name": "20240630_agentless_gpt4o", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV", + "RidgeClassifierCV.classes_" + ] + } + }, + { + "run_name": "20240925_hyperagent_lite1", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + } + }, + { + "run_name": "20240828_autose_mixed", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + } + }, + { + "run_name": "20240615_appmap-navie_gpt4o", + "spans": { + "sklearn/linear_model/ridge.py": [ + "_BaseRidgeCV.fit", + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + } + }, + { + "run_name": "20240530_autocoderover-v20240408", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "run_name": "20240702_codestory_aide_mixed", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "run_name": "20240706_sima_gpt4o", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + } + }, + { + "run_name": "20240623_moatless_claude35sonnet", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "run_name": "20240820_honeycomb", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + } + }, + { + "run_name": "20240808_RepoGraph_gpt4o", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV", + "RidgeClassifierCV.classes_" + ] + } + }, + { + "run_name": "20240908_infant_gpt4o", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "run_name": "20240829_Isoform", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "run_name": "20241028_agentless-1.5_gpt4o", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "run_name": "20240622_Lingma_Agent", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + } + }, + { + "run_name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "run_name": "20240612_IBM_Research_Agent101", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + } + }, + { + "run_name": "20240621_autocoderover-v20240620", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "run_name": "20240402_sweagent_gpt4", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + } + }, + { + "run_name": "20240912_marscode-agent-dev", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__" + ] + } + }, + { + "run_name": "20240617_moatless_gpt4o", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + } + }, + { + "run_name": "20240509_amazon-q-developer-agent-20240430-dev", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + } + }, + { + "run_name": "20240811_gru", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + } + }, + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + } + }, + { + "run_name": "20240620_sweagent_claude3.5sonnet", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV", + "RidgeClassifierCV.__init__" + ] + } + }, + { + "run_name": "20240721_amazon-q-developer-agent-20240719-dev", + "spans": { + "sklearn/linear_model/ridge.py": [ + "RidgeClassifierCV.__init__", + "RidgeClassifierCV.fit" + ] + } + } + ] + }, + { + "instance_id": "sympy__sympy-13773", + "repo": "sympy/sympy", + "base_commit": "7121bdf1facdd90d05b6994b4c2e5b2865a4638a", + "problem_statement": "@ (__matmul__) should fail if one argument is not a matrix\n```\r\n>>> A = Matrix([[1, 2], [3, 4]])\r\n>>> B = Matrix([[2, 3], [1, 2]])\r\n>>> A@B\r\nMatrix([\r\n[ 4, 7],\r\n[10, 17]])\r\n>>> 2@B\r\nMatrix([\r\n[4, 6],\r\n[2, 4]])\r\n```\r\n\r\nRight now `@` (`__matmul__`) just copies `__mul__`, but it should actually only work if the multiplication is actually a matrix multiplication. \r\n\r\nThis is also how NumPy works\r\n\r\n```\r\n>>> import numpy as np\r\n>>> a = np.array([[1, 2], [3, 4]])\r\n>>> 2*a\r\narray([[2, 4],\r\n [6, 8]])\r\n>>> 2@a\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\nValueError: Scalar operands are not allowed, use '*' instead\r\n```\n", + "golden_patch": "diff --git a/sympy/matrices/common.py b/sympy/matrices/common.py\n--- a/sympy/matrices/common.py\n+++ b/sympy/matrices/common.py\n@@ -1973,6 +1973,10 @@ def __div__(self, other):\n \n @call_highest_priority('__rmatmul__')\n def __matmul__(self, other):\n+ other = _matrixify(other)\n+ if not getattr(other, 'is_Matrix', False) and not getattr(other, 'is_MatrixLike', False):\n+ return NotImplemented\n+\n return self.__mul__(other)\n \n @call_highest_priority('__rmul__')\n@@ -2066,6 +2070,10 @@ def __radd__(self, other):\n \n @call_highest_priority('__matmul__')\n def __rmatmul__(self, other):\n+ other = _matrixify(other)\n+ if not getattr(other, 'is_Matrix', False) and not getattr(other, 'is_MatrixLike', False):\n+ return NotImplemented\n+\n return self.__rmul__(other)\n \n @call_highest_priority('__mul__')\n", + "test_patch": "diff --git a/sympy/matrices/tests/test_commonmatrix.py b/sympy/matrices/tests/test_commonmatrix.py\n--- a/sympy/matrices/tests/test_commonmatrix.py\n+++ b/sympy/matrices/tests/test_commonmatrix.py\n@@ -674,6 +674,30 @@ def test_multiplication():\n assert c[1, 0] == 3*5\n assert c[1, 1] == 0\n \n+def test_matmul():\n+ a = Matrix([[1, 2], [3, 4]])\n+\n+ assert a.__matmul__(2) == NotImplemented\n+\n+ assert a.__rmatmul__(2) == NotImplemented\n+\n+ #This is done this way because @ is only supported in Python 3.5+\n+ #To check 2@a case\n+ try:\n+ eval('2 @ a')\n+ except SyntaxError:\n+ pass\n+ except TypeError: #TypeError is raised in case of NotImplemented is returned\n+ pass\n+\n+ #Check a@2 case\n+ try:\n+ eval('a @ 2')\n+ except SyntaxError:\n+ pass\n+ except TypeError: #TypeError is raised in case of NotImplemented is returned\n+ pass\n+\n def test_power():\n raises(NonSquareMatrixError, lambda: Matrix((1, 2))**2)\n \n", + "fail_to_pass": "[\"test_matmul\"]", + "pass_to_pass": "[\"test__MinimalMatrix\", \"test_vec\", \"test_tolist\", \"test_row_col_del\", \"test_get_diag_blocks1\", \"test_get_diag_blocks2\", \"test_shape\", \"test_reshape\", \"test_row_col\", \"test_row_join\", \"test_col_join\", \"test_row_insert\", \"test_col_insert\", \"test_extract\", \"test_hstack\", \"test_vstack\", \"test_atoms\", \"test_free_symbols\", \"test_has\", \"test_is_anti_symmetric\", \"test_diagonal_symmetrical\", \"test_is_hermitian\", \"test_is_Identity\", \"test_is_symbolic\", \"test_is_upper\", \"test_is_lower\", \"test_is_square\", \"test_is_symmetric\", \"test_is_hessenberg\", \"test_is_zero\", \"test_values\", \"test_adjoint\", \"test_as_real_imag\", \"test_conjugate\", \"test_doit\", \"test_evalf\", \"test_expand\", \"test_replace\", \"test_replace_map\", \"test_simplify\", \"test_subs\", \"test_trace\", \"test_xreplace\", \"test_permute\", \"test_abs\", \"test_add\", \"test_power\", \"test_neg\", \"test_sub\", \"test_det\", \"test_adjugate\", \"test_cofactor_and_minors\", \"test_charpoly\", \"test_row_op\", \"test_col_op\", \"test_is_echelon\", \"test_echelon_form\", \"test_rref\", \"test_eye\", \"test_ones\", \"test_zeros\", \"test_diag\", \"test_jordan_block\", \"test_columnspace\", \"test_rowspace\", \"test_nullspace\", \"test_eigenvals\", \"test_singular_values\", \"test_integrate\"]", + "expected_spans": { + "sympy/matrices/common.py": [ + "MatrixArithmetic.__matmul__", + "MatrixArithmetic.__rmatmul__" + ] + }, + "test_file_spans": { + "sympy/matrices/tests/test_commonmatrix.py": [ + "test_power" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-13895", + "repo": "sympy/sympy", + "base_commit": "4da0b64558e9551a11a99bccc63557ba34f50c58", + "problem_statement": "(-x/4 - S(1)/12)**x - 1 simplifies to an inequivalent expression\n >>> from sympy import *\r\n >>> x = Symbol('x')\r\n >>> e = (-x/4 - S(1)/12)**x - 1\r\n >>> e\r\n (-x/4 - 1/12)**x - 1\r\n >>> f = simplify(e)\r\n >>> f\r\n 12**(-x)*(-12**x + (-3*x - 1)**x)\r\n >>> a = S(9)/5\r\n >>> simplify(e.subs(x,a))\r\n -1 - 32*15**(1/5)*2**(2/5)/225\r\n >>> simplify(f.subs(x,a))\r\n -1 - 32*(-1)**(4/5)*60**(1/5)/225\r\n >>> N(e.subs(x,a))\r\n -1.32255049319339\r\n >>> N(f.subs(x,a))\r\n -0.739051169462523 - 0.189590423018741*I\r\n\r\n\n", + "golden_patch": "diff --git a/sympy/core/numbers.py b/sympy/core/numbers.py\n--- a/sympy/core/numbers.py\n+++ b/sympy/core/numbers.py\n@@ -2248,11 +2248,9 @@ def _eval_power(self, expt):\n if p is not False:\n dict = {p[0]: p[1]}\n else:\n- dict = Integer(self).factors(limit=2**15)\n+ dict = Integer(b_pos).factors(limit=2**15)\n \n # now process the dict of factors\n- if self.is_negative:\n- dict[-1] = 1\n out_int = 1 # integer part\n out_rad = 1 # extracted radicals\n sqr_int = 1\n@@ -2282,10 +2280,12 @@ def _eval_power(self, expt):\n break\n for k, v in sqr_dict.items():\n sqr_int *= k**(v//sqr_gcd)\n- if sqr_int == self and out_int == 1 and out_rad == 1:\n+ if sqr_int == b_pos and out_int == 1 and out_rad == 1:\n result = None\n else:\n result = out_int*out_rad*Pow(sqr_int, Rational(sqr_gcd, expt.q))\n+ if self.is_negative:\n+ result *= Pow(S.NegativeOne, expt)\n return result\n \n def _eval_is_prime(self):\n", + "test_patch": "diff --git a/sympy/core/tests/test_numbers.py b/sympy/core/tests/test_numbers.py\n--- a/sympy/core/tests/test_numbers.py\n+++ b/sympy/core/tests/test_numbers.py\n@@ -1021,6 +1021,12 @@ def test_powers_Integer():\n assert (-3) ** Rational(-2, 3) == \\\n -(-1)**Rational(1, 3)*3**Rational(1, 3)/3\n \n+ # negative base and rational power with some simplification\n+ assert (-8) ** Rational(2, 5) == \\\n+ 2*(-1)**Rational(2, 5)*2**Rational(1, 5)\n+ assert (-4) ** Rational(9, 5) == \\\n+ -8*(-1)**Rational(4, 5)*2**Rational(3, 5)\n+\n assert S(1234).factors() == {617: 1, 2: 1}\n assert Rational(2*3, 3*5*7).factors() == {2: 1, 5: -1, 7: -1}\n \n@@ -1194,6 +1200,14 @@ def test_issue_3449():\n assert sqrt(x - 1).subs(x, 5) == 2\n \n \n+def test_issue_13890():\n+ x = Symbol(\"x\")\n+ e = (-x/4 - S(1)/12)**x - 1\n+ f = simplify(e)\n+ a = S(9)/5\n+ assert abs(e.subs(x,a).evalf() - f.subs(x,a).evalf()) < 1e-15\n+\n+\n def test_Integer_factors():\n def F(i):\n return Integer(i).factors()\n", + "fail_to_pass": "[\"test_powers_Integer\", \"test_issue_13890\"]", + "pass_to_pass": "[\"test_integers_cache\", \"test_seterr\", \"test_mod\", \"test_divmod\", \"test_igcd\", \"test_igcd_lehmer\", \"test_igcd2\", \"test_ilcm\", \"test_igcdex\", \"test_Integer_new\", \"test_Rational_new\", \"test_Number_new\", \"test_Rational_cmp\", \"test_Float\", \"test_float_mpf\", \"test_Float_RealElement\", \"test_Float_default_to_highprec_from_str\", \"test_Float_eval\", \"test_Float_issue_2107\", \"test_Float_from_tuple\", \"test_Infinity\", \"test_Infinity_2\", \"test_Mul_Infinity_Zero\", \"test_Div_By_Zero\", \"test_Infinity_inequations\", \"test_NaN\", \"test_special_numbers\", \"test_powers\", \"test_integer_nthroot_overflow\", \"test_isqrt\", \"test_powers_Rational\", \"test_powers_Float\", \"test_abs1\", \"test_accept_int\", \"test_dont_accept_str\", \"test_int\", \"test_long\", \"test_real_bug\", \"test_bug_sqrt\", \"test_pi_Pi\", \"test_no_len\", \"test_issue_3321\", \"test_issue_3692\", \"test_issue_3423\", \"test_issue_3449\", \"test_Integer_factors\", \"test_Rational_factors\", \"test_issue_4107\", \"test_IntegerInteger\", \"test_Rational_gcd_lcm_cofactors\", \"test_Float_gcd_lcm_cofactors\", \"test_issue_4611\", \"test_conversion_to_mpmath\", \"test_relational\", \"test_Integer_as_index\", \"test_Rational_int\", \"test_zoo\", \"test_issue_4122\", \"test_GoldenRatio_expand\", \"test_as_content_primitive\", \"test_hashing_sympy_integers\", \"test_issue_4172\", \"test_Catalan_EulerGamma_prec\", \"test_Float_eq\", \"test_int_NumberSymbols\", \"test_issue_6640\", \"test_issue_6349\", \"test_mpf_norm\", \"test_latex\", \"test_issue_7742\", \"test_simplify_AlgebraicNumber\", \"test_Float_idempotence\", \"test_comp\", \"test_issue_9491\", \"test_issue_10063\", \"test_issue_10020\", \"test_invert_numbers\", \"test_mod_inverse\", \"test_golden_ratio_rewrite_as_sqrt\", \"test_comparisons_with_unknown_type\", \"test_NumberSymbol_comparison\", \"test_Integer_precision\"]", + "expected_spans": { + "sympy/core/numbers.py": [ + "Integer._eval_power" + ] + }, + "test_file_spans": { + "sympy/core/tests/test_numbers.py": [ + "test_powers_Integer", + "test_Integer_factors" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-13915", + "repo": "sympy/sympy", + "base_commit": "5c1644ff85e15752f9f8721bc142bfbf975e7805", + "problem_statement": "Issue with a substitution that leads to an undefined expression\n```\r\nPython 3.6.4 |Anaconda custom (64-bit)| (default, Dec 21 2017, 15:39:08) \r\nType 'copyright', 'credits' or 'license' for more information\r\nIPython 6.2.1 -- An enhanced Interactive Python. Type '?' for help.\r\n\r\nIn [1]: from sympy import *\r\n\r\nIn [2]: a,b = symbols('a,b')\r\n\r\nIn [3]: r = (1/(a+b) + 1/(a-b))/(1/(a+b) - 1/(a-b))\r\n\r\nIn [4]: r.subs(b,a)\r\nOut[4]: 1\r\n\r\nIn [6]: import sympy\r\n\r\nIn [7]: sympy.__version__\r\nOut[7]: '1.1.1'\r\n```\r\n\r\nIf b is substituted by a, r is undefined. It is possible to calculate the limit\r\n`r.limit(b,a) # -1`\r\n\r\nBut whenever a subexpression of r is undefined, r itself is undefined.\n", + "golden_patch": "diff --git a/sympy/core/mul.py b/sympy/core/mul.py\n--- a/sympy/core/mul.py\n+++ b/sympy/core/mul.py\n@@ -423,6 +423,11 @@ def _gather(c_powers):\n changed = False\n for b, e in c_powers:\n if e.is_zero:\n+ # canceling out infinities yields NaN\n+ if (b.is_Add or b.is_Mul) and any(infty in b.args\n+ for infty in (S.ComplexInfinity, S.Infinity,\n+ S.NegativeInfinity)):\n+ return [S.NaN], [], None\n continue\n if e is S.One:\n if b.is_Number:\n", + "test_patch": "diff --git a/sympy/core/tests/test_arit.py b/sympy/core/tests/test_arit.py\n--- a/sympy/core/tests/test_arit.py\n+++ b/sympy/core/tests/test_arit.py\n@@ -1,7 +1,7 @@\n from __future__ import division\n \n from sympy import (Basic, Symbol, sin, cos, exp, sqrt, Rational, Float, re, pi,\n- sympify, Add, Mul, Pow, Mod, I, log, S, Max, symbols, oo, Integer,\n+ sympify, Add, Mul, Pow, Mod, I, log, S, Max, symbols, oo, zoo, Integer,\n sign, im, nan, Dummy, factorial, comp, refine\n )\n from sympy.core.compatibility import long, range\n@@ -1937,6 +1937,14 @@ def test_Mul_with_zero_infinite():\n assert e.is_positive is None\n assert e.is_hermitian is None\n \n+def test_Mul_does_not_cancel_infinities():\n+ a, b = symbols('a b')\n+ assert ((zoo + 3*a)/(3*a + zoo)) is nan\n+ assert ((b - oo)/(b - oo)) is nan\n+ # issue 13904\n+ expr = (1/(a+b) + 1/(a-b))/(1/(a+b) - 1/(a-b))\n+ assert expr.subs(b, a) is nan\n+\n def test_issue_8247_8354():\n from sympy import tan\n z = sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3)) - sqrt(10 + 6*sqrt(3))\n", + "fail_to_pass": "[\"test_Mul_does_not_cancel_infinities\"]", + "pass_to_pass": "[\"test_bug1\", \"test_Symbol\", \"test_arit0\", \"test_pow2\", \"test_pow3\", \"test_mod_pow\", \"test_pow_E\", \"test_pow_issue_3516\", \"test_pow_im\", \"test_real_mul\", \"test_ncmul\", \"test_ncpow\", \"test_powerbug\", \"test_Mul_doesnt_expand_exp\", \"test_Add_Mul_is_integer\", \"test_Add_Mul_is_finite\", \"test_Mul_is_even_odd\", \"test_evenness_in_ternary_integer_product_with_even\", \"test_oddness_in_ternary_integer_product_with_even\", \"test_Mul_is_rational\", \"test_Add_is_rational\", \"test_Add_is_even_odd\", \"test_Mul_is_negative_positive\", \"test_Mul_is_negative_positive_2\", \"test_Mul_is_nonpositive_nonnegative\", \"test_Pow_is_zero\", \"test_Mul_hermitian_antihermitian\", \"test_Add_is_comparable\", \"test_Mul_is_comparable\", \"test_Pow_is_comparable\", \"test_Add_is_positive_2\", \"test_Add_is_irrational\", \"test_issue_3531b\", \"test_bug3\", \"test_suppressed_evaluation\", \"test_Add_as_coeff_mul\", \"test_Pow_as_coeff_mul_doesnt_expand\", \"test_issue_3514\", \"test_make_args\", \"test_issue_5126\", \"test_Rational_as_content_primitive\", \"test_Add_as_content_primitive\", \"test_Mul_as_content_primitive\", \"test_Pow_as_content_primitive\", \"test_issue_5460\", \"test_product_irrational\", \"test_issue_5919\", \"test_Mod_is_integer\", \"test_issue_6001\", \"test_polar\", \"test_issue_6040\", \"test_issue_6082\", \"test_issue_6077\", \"test_mul_flatten_oo\", \"test_add_flatten\", \"test_issue_5160_6087_6089_6090\", \"test_float_int\", \"test_issue_6611a\", \"test_denest_add_mul\", \"test_mul_zero_detection\", \"test_Mul_with_zero_infinite\", \"test_issue_8247_8354\"]", + "expected_spans": { + "sympy/core/mul.py": [ + "Mul.flatten" + ] + }, + "test_file_spans": { + "sympy/core/tests/test_arit.py": [ + "imports", + "test_issue_8247_8354" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "scikit-learn__scikit-learn-10508", + "repo": "scikit-learn/scikit-learn", + "base_commit": "c753b77ac49e72ebc0fe5e3c2369fe628f975017", + "problem_statement": "LabelEncoder transform fails for empty lists (for certain inputs)\nPython 3.6.3, scikit_learn 0.19.1\r\n\r\nDepending on which datatypes were used to fit the LabelEncoder, transforming empty lists works or not. Expected behavior would be that empty arrays are returned in both cases.\r\n\r\n```python\r\n>>> from sklearn.preprocessing import LabelEncoder\r\n>>> le = LabelEncoder()\r\n>>> le.fit([1,2])\r\nLabelEncoder()\r\n>>> le.transform([])\r\narray([], dtype=int64)\r\n>>> le.fit([\"a\",\"b\"])\r\nLabelEncoder()\r\n>>> le.transform([])\r\nTraceback (most recent call last):\r\n File \"[...]\\Python36\\lib\\site-packages\\numpy\\core\\fromnumeric.py\", line 57, in _wrapfunc\r\n return getattr(obj, method)(*args, **kwds)\r\nTypeError: Cannot cast array data from dtype('float64') to dtype('\", line 1, in \r\n File \"[...]\\Python36\\lib\\site-packages\\sklearn\\preprocessing\\label.py\", line 134, in transform\r\n return np.searchsorted(self.classes_, y)\r\n File \"[...]\\Python36\\lib\\site-packages\\numpy\\core\\fromnumeric.py\", line 1075, in searchsorted\r\n return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)\r\n File \"[...]\\Python36\\lib\\site-packages\\numpy\\core\\fromnumeric.py\", line 67, in _wrapfunc\r\n return _wrapit(obj, method, *args, **kwds)\r\n File \"[...]\\Python36\\lib\\site-packages\\numpy\\core\\fromnumeric.py\", line 47, in _wrapit\r\n result = getattr(asarray(obj), method)(*args, **kwds)\r\nTypeError: Cannot cast array data from dtype('float64') to dtype('>> a = Symbol('a', integer=True, positive=True)\r\n>>> e = (-a)**x * a**(-x)\r\n>>> f = simplify(e)\r\n>>> print(e)\r\na**(-x)*(-a)**x\r\n>>> print(f)\r\n(-1)**x\r\n>>> t = -S(10)/3\r\n>>> n1 = e.subs(x,t)\r\n>>> n2 = f.subs(x,t)\r\n>>> print(N(n1))\r\n-0.5 + 0.866025403784439*I\r\n>>> print(N(n2))\r\n-0.5 + 0.866025403784439*I\r\n```\r\n\r\nvs\r\n\r\n```\r\n>>> a = S(2)\r\n>>> e = (-a)**x * a**(-x)\r\n>>> f = simplify(e)\r\n>>> print(e)\r\n(-2)**x*2**(-x)\r\n>>> print(f)\r\n(-1)**x\r\n>>> t = -S(10)/3\r\n>>> n1 = e.subs(x,t)\r\n>>> n2 = f.subs(x,t)\r\n>>> print(N(n1))\r\n0.5 - 0.866025403784439*I\r\n>>> print(N(n2))\r\n-0.5 + 0.866025403784439*I\r\n```\n", + "golden_patch": "diff --git a/sympy/core/numbers.py b/sympy/core/numbers.py\n--- a/sympy/core/numbers.py\n+++ b/sympy/core/numbers.py\n@@ -1678,11 +1678,7 @@ def _eval_power(self, expt):\n if (ne is S.One):\n return Rational(self.q, self.p)\n if self.is_negative:\n- if expt.q != 1:\n- return -(S.NegativeOne)**((expt.p % expt.q) /\n- S(expt.q))*Rational(self.q, -self.p)**ne\n- else:\n- return S.NegativeOne**ne*Rational(self.q, -self.p)**ne\n+ return S.NegativeOne**expt*Rational(self.q, -self.p)**ne\n else:\n return Rational(self.q, self.p)**ne\n if expt is S.Infinity: # -oo already caught by test for negative\n@@ -2223,11 +2219,7 @@ def _eval_power(self, expt):\n # invert base and change sign on exponent\n ne = -expt\n if self.is_negative:\n- if expt.q != 1:\n- return -(S.NegativeOne)**((expt.p % expt.q) /\n- S(expt.q))*Rational(1, -self)**ne\n- else:\n- return (S.NegativeOne)**ne*Rational(1, -self)**ne\n+ return S.NegativeOne**expt*Rational(1, -self)**ne\n else:\n return Rational(1, self.p)**ne\n # see if base is a perfect root, sqrt(4) --> 2\n", + "test_patch": "diff --git a/sympy/core/tests/test_numbers.py b/sympy/core/tests/test_numbers.py\n--- a/sympy/core/tests/test_numbers.py\n+++ b/sympy/core/tests/test_numbers.py\n@@ -1041,6 +1041,10 @@ def test_powers_Integer():\n -(-1)**Rational(2, 3)*3**Rational(2, 3)/27\n assert (-3) ** Rational(-2, 3) == \\\n -(-1)**Rational(1, 3)*3**Rational(1, 3)/3\n+ assert (-2) ** Rational(-10, 3) == \\\n+ (-1)**Rational(2, 3)*2**Rational(2, 3)/16\n+ assert abs(Pow(-2, Rational(-10, 3)).n() -\n+ Pow(-2, Rational(-10, 3), evaluate=False).n()) < 1e-16\n \n # negative base and rational power with some simplification\n assert (-8) ** Rational(2, 5) == \\\n@@ -1121,6 +1125,10 @@ def test_powers_Rational():\n -4*(-1)**Rational(2, 3)*2**Rational(1, 3)*3**Rational(2, 3)/27\n assert Rational(-3, 2)**Rational(-2, 3) == \\\n -(-1)**Rational(1, 3)*2**Rational(2, 3)*3**Rational(1, 3)/3\n+ assert Rational(-3, 2)**Rational(-10, 3) == \\\n+ 8*(-1)**Rational(2, 3)*2**Rational(1, 3)*3**Rational(2, 3)/81\n+ assert abs(Pow(Rational(-2, 3), Rational(-7, 4)).n() -\n+ Pow(Rational(-2, 3), Rational(-7, 4), evaluate=False).n()) < 1e-16\n \n # negative integer power and negative rational base\n assert Rational(-2, 3) ** Rational(-2, 1) == Rational(9, 4)\n", + "fail_to_pass": "[\"test_powers_Integer\", \"test_powers_Rational\"]", + "pass_to_pass": "[\"test_integers_cache\", \"test_seterr\", \"test_mod\", \"test_divmod\", \"test_igcd\", \"test_igcd_lehmer\", \"test_igcd2\", \"test_ilcm\", \"test_igcdex\", \"test_Integer_new\", \"test_Rational_new\", \"test_Number_new\", \"test_Rational_cmp\", \"test_Float\", \"test_float_mpf\", \"test_Float_RealElement\", \"test_Float_default_to_highprec_from_str\", \"test_Float_eval\", \"test_Float_issue_2107\", \"test_Float_from_tuple\", \"test_Infinity\", \"test_Infinity_2\", \"test_Mul_Infinity_Zero\", \"test_Div_By_Zero\", \"test_Infinity_inequations\", \"test_NaN\", \"test_special_numbers\", \"test_powers\", \"test_integer_nthroot_overflow\", \"test_integer_log\", \"test_isqrt\", \"test_powers_Float\", \"test_abs1\", \"test_accept_int\", \"test_dont_accept_str\", \"test_int\", \"test_long\", \"test_real_bug\", \"test_bug_sqrt\", \"test_pi_Pi\", \"test_no_len\", \"test_issue_3321\", \"test_issue_3692\", \"test_issue_3423\", \"test_issue_3449\", \"test_issue_13890\", \"test_Integer_factors\", \"test_Rational_factors\", \"test_issue_4107\", \"test_IntegerInteger\", \"test_Rational_gcd_lcm_cofactors\", \"test_Float_gcd_lcm_cofactors\", \"test_issue_4611\", \"test_conversion_to_mpmath\", \"test_relational\", \"test_Integer_as_index\", \"test_Rational_int\", \"test_zoo\", \"test_issue_4122\", \"test_GoldenRatio_expand\", \"test_as_content_primitive\", \"test_hashing_sympy_integers\", \"test_issue_4172\", \"test_Catalan_EulerGamma_prec\", \"test_Float_eq\", \"test_int_NumberSymbols\", \"test_issue_6640\", \"test_issue_6349\", \"test_mpf_norm\", \"test_latex\", \"test_issue_7742\", \"test_simplify_AlgebraicNumber\", \"test_Float_idempotence\", \"test_comp\", \"test_issue_9491\", \"test_issue_10063\", \"test_issue_10020\", \"test_invert_numbers\", \"test_mod_inverse\", \"test_golden_ratio_rewrite_as_sqrt\", \"test_comparisons_with_unknown_type\", \"test_NumberSymbol_comparison\", \"test_Integer_precision\"]", + "expected_spans": { + "sympy/core/numbers.py": [ + "Rational._eval_power", + "Integer._eval_power" + ] + }, + "test_file_spans": { + "sympy/core/tests/test_numbers.py": [ + "test_powers_Integer", + "test_powers_Rational" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-14308", + "repo": "sympy/sympy", + "base_commit": "fb536869fb7aa28b2695ad7a3b70949926b291c4", + "problem_statement": "vectors break pretty printing\n```py\r\nIn [1]: from sympy.vector import *\r\n\r\nIn [2]: e = CoordSysCartesian('e')\r\n\r\nIn [3]: (x/y)**t*e.j\r\nOut[3]:\r\n\u239b t\u239e e_j\r\n\u239c\u239bx\u239e e_j \u239f\r\n\u239c\u239c\u2500\u239f \u239f\r\n\u239d\u239dy\u23a0 \u23a0\r\n```\r\n\r\nAlso, when it does print correctly, the baseline is wrong (it should be centered). \n", + "golden_patch": "diff --git a/sympy/printing/pretty/pretty.py b/sympy/printing/pretty/pretty.py\n--- a/sympy/printing/pretty/pretty.py\n+++ b/sympy/printing/pretty/pretty.py\n@@ -931,26 +931,49 @@ def _print_BasisDependent(self, expr):\n #Fixing the newlines\n lengths = []\n strs = ['']\n+ flag = []\n for i, partstr in enumerate(o1):\n+ flag.append(0)\n # XXX: What is this hack?\n if '\\n' in partstr:\n tempstr = partstr\n tempstr = tempstr.replace(vectstrs[i], '')\n- tempstr = tempstr.replace(u'\\N{RIGHT PARENTHESIS UPPER HOOK}',\n- u'\\N{RIGHT PARENTHESIS UPPER HOOK}'\n- + ' ' + vectstrs[i])\n+ if u'\\N{right parenthesis extension}' in tempstr: # If scalar is a fraction\n+ for paren in range(len(tempstr)):\n+ flag[i] = 1\n+ if tempstr[paren] == u'\\N{right parenthesis extension}':\n+ tempstr = tempstr[:paren] + u'\\N{right parenthesis extension}'\\\n+ + ' ' + vectstrs[i] + tempstr[paren + 1:]\n+ break\n+ elif u'\\N{RIGHT PARENTHESIS LOWER HOOK}' in tempstr:\n+ flag[i] = 1\n+ tempstr = tempstr.replace(u'\\N{RIGHT PARENTHESIS LOWER HOOK}',\n+ u'\\N{RIGHT PARENTHESIS LOWER HOOK}'\n+ + ' ' + vectstrs[i])\n+ else:\n+ tempstr = tempstr.replace(u'\\N{RIGHT PARENTHESIS UPPER HOOK}',\n+ u'\\N{RIGHT PARENTHESIS UPPER HOOK}'\n+ + ' ' + vectstrs[i])\n o1[i] = tempstr\n+\n o1 = [x.split('\\n') for x in o1]\n- n_newlines = max([len(x) for x in o1])\n- for parts in o1:\n- lengths.append(len(parts[0]))\n+ n_newlines = max([len(x) for x in o1]) # Width of part in its pretty form\n+\n+ if 1 in flag: # If there was a fractional scalar\n+ for i, parts in enumerate(o1):\n+ if len(parts) == 1: # If part has no newline\n+ parts.insert(0, ' ' * (len(parts[0])))\n+ flag[i] = 1\n+\n+ for i, parts in enumerate(o1):\n+ lengths.append(len(parts[flag[i]]))\n for j in range(n_newlines):\n if j+1 <= len(parts):\n if j >= len(strs):\n strs.append(' ' * (sum(lengths[:-1]) +\n 3*(len(lengths)-1)))\n- if j == 0:\n- strs[0] += parts[0] + ' + '\n+ if j == flag[i]:\n+ strs[flag[i]] += parts[flag[i]] + ' + '\n else:\n strs[j] += parts[j] + ' '*(lengths[-1] -\n len(parts[j])+\n", + "test_patch": "diff --git a/sympy/printing/pretty/tests/test_pretty.py b/sympy/printing/pretty/tests/test_pretty.py\n--- a/sympy/printing/pretty/tests/test_pretty.py\n+++ b/sympy/printing/pretty/tests/test_pretty.py\n@@ -6089,6 +6089,28 @@ def test_MatrixElement_printing():\n assert upretty(F) == ucode_str1\n \n \n+def test_issue_12675():\n+ from sympy.vector import CoordSys3D\n+ x, y, t, j = symbols('x y t j')\n+ e = CoordSys3D('e')\n+\n+ ucode_str = \\\n+u(\"\"\"\\\n+\u239b t\u239e \\n\\\n+\u239c\u239bx\u239e \u239f e_j\\n\\\n+\u239c\u239c\u2500\u239f \u239f \\n\\\n+\u239d\u239dy\u23a0 \u23a0 \\\n+\"\"\")\n+ assert upretty((x/y)**t*e.j) == ucode_str\n+ ucode_str = \\\n+u(\"\"\"\\\n+\u239b1\u239e \\n\\\n+\u239c\u2500\u239f e_j\\n\\\n+\u239dy\u23a0 \\\n+\"\"\")\n+ assert upretty((1/y)*e.j) == ucode_str\n+\n+\n def test_MatrixSymbol_printing():\n # test cases for issue #14237\n A = MatrixSymbol(\"A\", 3, 3)\ndiff --git a/sympy/vector/tests/test_printing.py b/sympy/vector/tests/test_printing.py\n--- a/sympy/vector/tests/test_printing.py\n+++ b/sympy/vector/tests/test_printing.py\n@@ -37,8 +37,8 @@ def upretty(expr):\n v.append(N.j - (Integral(f(b)) - C.x**2)*N.k)\n upretty_v_8 = u(\n \"\"\"\\\n-N_j + \u239b 2 \u2320 \u239e N_k\\n\\\n- \u239cC_x - \u23ae f(b) db\u239f \\n\\\n+ \u239b 2 \u2320 \u239e \\n\\\n+N_j + \u239cC_x - \u23ae f(b) db\u239f N_k\\n\\\n \u239d \u2321 \u23a0 \\\n \"\"\")\n pretty_v_8 = u(\n@@ -55,9 +55,9 @@ def upretty(expr):\n v.append((a**2 + b)*N.i + (Integral(f(b)))*N.k)\n upretty_v_11 = u(\n \"\"\"\\\n-\u239b 2 \u239e N_i + \u239b\u2320 \u239e N_k\\n\\\n-\u239da + b\u23a0 \u239c\u23ae f(b) db\u239f \\n\\\n- \u239d\u2321 \u23a0 \\\n+\u239b 2 \u239e \u239b\u2320 \u239e \\n\\\n+\u239da + b\u23a0 N_i + \u239c\u23ae f(b) db\u239f N_k\\n\\\n+ \u239d\u2321 \u23a0 \\\n \"\"\")\n pretty_v_11 = u(\n \"\"\"\\\n@@ -85,8 +85,8 @@ def upretty(expr):\n # This is the pretty form for ((a**2 + b)*N.i + 3*(C.y - c)*N.k) | N.k\n upretty_d_7 = u(\n \"\"\"\\\n-\u239b 2 \u239e (N_i|N_k) + (3\u22c5C_y - 3\u22c5c) (N_k|N_k)\\n\\\n-\u239da + b\u23a0 \\\n+\u239b 2 \u239e \\n\\\n+\u239da + b\u23a0 (N_i|N_k) + (3\u22c5C_y - 3\u22c5c) (N_k|N_k)\\\n \"\"\")\n pretty_d_7 = u(\n \"\"\"\\\n", + "fail_to_pass": "[\"test_issue_12675\", \"test_pretty_print_unicode\"]", + "pass_to_pass": "[\"test_pretty_ascii_str\", \"test_pretty_unicode_str\", \"test_upretty_greek\", \"test_upretty_multiindex\", \"test_upretty_sub_super\", \"test_upretty_subs_missing_in_24\", \"test_upretty_modifiers\", \"test_pretty_Cycle\", \"test_pretty_basic\", \"test_negative_fractions\", \"test_issue_5524\", \"test_pretty_ordering\", \"test_EulerGamma\", \"test_GoldenRatio\", \"test_pretty_relational\", \"test_Assignment\", \"test_AugmentedAssignment\", \"test_issue_7117\", \"test_pretty_rational\", \"test_pretty_functions\", \"test_pretty_sqrt\", \"test_pretty_sqrt_char_knob\", \"test_pretty_sqrt_longsymbol_no_sqrt_char\", \"test_pretty_KroneckerDelta\", \"test_pretty_product\", \"test_pretty_lambda\", \"test_pretty_order\", \"test_pretty_derivatives\", \"test_pretty_integrals\", \"test_pretty_matrix\", \"test_pretty_ndim_arrays\", \"test_tensor_TensorProduct\", \"test_diffgeom_print_WedgeProduct\", \"test_Adjoint\", \"test_pretty_Trace_issue_9044\", \"test_MatrixExpressions\", \"test_pretty_dotproduct\", \"test_pretty_piecewise\", \"test_pretty_ITE\", \"test_pretty_seq\", \"test_any_object_in_sequence\", \"test_print_builtin_set\", \"test_pretty_sets\", \"test_pretty_SetExpr\", \"test_pretty_ImageSet\", \"test_pretty_ConditionSet\", \"test_pretty_ComplexRegion\", \"test_pretty_Union_issue_10414\", \"test_pretty_Intersection_issue_10414\", \"test_ProductSet_paranthesis\", \"test_ProductSet_prod_char_issue_10413\", \"test_pretty_sequences\", \"test_pretty_FourierSeries\", \"test_pretty_FormalPowerSeries\", \"test_pretty_limits\", \"test_pretty_ComplexRootOf\", \"test_pretty_RootSum\", \"test_GroebnerBasis\", \"test_pretty_Boolean\", \"test_pretty_Domain\", \"test_pretty_prec\", \"test_pprint\", \"test_pretty_class\", \"test_pretty_no_wrap_line\", \"test_settings\", \"test_pretty_sum\", \"test_units\", \"test_pretty_Subs\", \"test_gammas\", \"test_beta\", \"test_function_subclass_different_name\", \"test_SingularityFunction\", \"test_deltas\", \"test_hyper\", \"test_meijerg\", \"test_noncommutative\", \"test_pretty_special_functions\", \"test_expint\", \"test_elliptic_functions\", \"test_RandomDomain\", \"test_PrettyPoly\", \"test_issue_6285\", \"test_issue_6359\", \"test_issue_6739\", \"test_complicated_symbol_unchanged\", \"test_categories\", \"test_PrettyModules\", \"test_QuotientRing\", \"test_Homomorphism\", \"test_Tr\", \"test_pretty_Add\", \"test_issue_7179\", \"test_issue_7180\", \"test_pretty_Complement\", \"test_pretty_SymmetricDifference\", \"test_pretty_Contains\", \"test_issue_4335\", \"test_issue_6324\", \"test_issue_7927\", \"test_issue_6134\", \"test_issue_9877\", \"test_issue_13651\", \"test_pretty_primenu\", \"test_pretty_primeomega\", \"test_pretty_Mod\", \"test_issue_11801\", \"test_pretty_UnevaluatedExpr\", \"test_issue_10472\", \"test_MatrixElement_printing\", \"test_MatrixSymbol_printing\", \"test_degree_printing\", \"test_str_printing\", \"test_latex_printing\"]", + "expected_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_BasisDependent" + ] + }, + "test_file_spans": { + "sympy/printing/pretty/tests/test_pretty.py": [ + "test_MatrixSymbol_printing" + ], + "sympy/vector/tests/test_printing.py": [ + "impl", + "impl:39" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-14317", + "repo": "sympy/sympy", + "base_commit": "fb536869fb7aa28b2695ad7a3b70949926b291c4", + "problem_statement": "LaTeX printer does not use the same order of monomials as pretty and str \nWhen printing a Poly, the str and pretty printers use the logical order of monomials, from highest to lowest degrees. But latex printer does not. \r\n```\r\n>>> var('a b c x')\r\n>>> p = Poly([a, 1, b, 2, c, 3], x)\r\n>>> p\r\nPoly(a*x**5 + x**4 + b*x**3 + 2*x**2 + c*x + 3, x, domain='ZZ[a,b,c]')\r\n>>> pretty(p)\r\n\"Poly(a*x**5 + x**4 + b*x**3 + 2*x**2 + c*x + 3, x, domain='ZZ[a,b,c]')\"\r\n>>> latex(p)\r\n'\\\\operatorname{Poly}{\\\\left( a x^{5} + b x^{3} + c x + x^{4} + 2 x^{2} + 3, x, domain=\\\\mathbb{Z}\\\\left[a, b, c\\\\right] \\\\right)}'\r\n```\n", + "golden_patch": "diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -1813,7 +1813,50 @@ def _print_PolynomialRingBase(self, expr):\n \n def _print_Poly(self, poly):\n cls = poly.__class__.__name__\n- expr = self._print(poly.as_expr())\n+ terms = []\n+ for monom, coeff in poly.terms():\n+ s_monom = ''\n+ for i, exp in enumerate(monom):\n+ if exp > 0:\n+ if exp == 1:\n+ s_monom += self._print(poly.gens[i])\n+ else:\n+ s_monom += self._print(pow(poly.gens[i], exp))\n+\n+ if coeff.is_Add:\n+ if s_monom:\n+ s_coeff = r\"\\left(%s\\right)\" % self._print(coeff)\n+ else:\n+ s_coeff = self._print(coeff)\n+ else:\n+ if s_monom:\n+ if coeff is S.One:\n+ terms.extend(['+', s_monom])\n+ continue\n+\n+ if coeff is S.NegativeOne:\n+ terms.extend(['-', s_monom])\n+ continue\n+\n+ s_coeff = self._print(coeff)\n+\n+ if not s_monom:\n+ s_term = s_coeff\n+ else:\n+ s_term = s_coeff + \" \" + s_monom\n+\n+ if s_term.startswith('-'):\n+ terms.extend(['-', s_term[1:]])\n+ else:\n+ terms.extend(['+', s_term])\n+\n+ if terms[0] in ['-', '+']:\n+ modifier = terms.pop(0)\n+\n+ if modifier == '-':\n+ terms[0] = '-' + terms[0]\n+\n+ expr = ' '.join(terms)\n gens = list(map(self._print, poly.gens))\n domain = \"domain=%s\" % self._print(poly.get_domain())\n \n", + "test_patch": "diff --git a/sympy/printing/tests/test_latex.py b/sympy/printing/tests/test_latex.py\n--- a/sympy/printing/tests/test_latex.py\n+++ b/sympy/printing/tests/test_latex.py\n@@ -1132,11 +1132,20 @@ def test_latex_Poly():\n assert latex(Poly(x**2 + 2 * x, x)) == \\\n r\"\\operatorname{Poly}{\\left( x^{2} + 2 x, x, domain=\\mathbb{Z} \\right)}\"\n assert latex(Poly(x/y, x)) == \\\n- r\"\\operatorname{Poly}{\\left( \\frac{x}{y}, x, domain=\\mathbb{Z}\\left(y\\right) \\right)}\"\n+ r\"\\operatorname{Poly}{\\left( \\frac{1}{y} x, x, domain=\\mathbb{Z}\\left(y\\right) \\right)}\"\n assert latex(Poly(2.0*x + y)) == \\\n r\"\\operatorname{Poly}{\\left( 2.0 x + 1.0 y, x, y, domain=\\mathbb{R} \\right)}\"\n \n \n+def test_latex_Poly_order():\n+ assert latex(Poly([a, 1, b, 2, c, 3], x)) == \\\n+ '\\\\operatorname{Poly}{\\\\left( a x^{5} + x^{4} + b x^{3} + 2 x^{2} + c x + 3, x, domain=\\\\mathbb{Z}\\\\left[a, b, c\\\\right] \\\\right)}'\n+ assert latex(Poly([a, 1, b+c, 2, 3], x)) == \\\n+ '\\\\operatorname{Poly}{\\\\left( a x^{4} + x^{3} + \\\\left(b + c\\\\right) x^{2} + 2 x + 3, x, domain=\\\\mathbb{Z}\\\\left[a, b, c\\\\right] \\\\right)}'\n+ assert latex(Poly(a*x**3 + x**2*y - x*y - c*y**3 - b*x*y**2 + y - a*x + b, (x, y))) == \\\n+ '\\\\operatorname{Poly}{\\\\left( a x^{3} + x^{2}y - b xy^{2} - xy - a x - c y^{3} + y + b, x, y, domain=\\\\mathbb{Z}\\\\left[a, b, c\\\\right] \\\\right)}'\n+\n+\n def test_latex_ComplexRootOf():\n assert latex(rootof(x**5 + x + 3, 0)) == \\\n r\"\\operatorname{CRootOf} {\\left(x^{5} + x + 3, 0\\right)}\"\n", + "fail_to_pass": "[\"test_latex_Poly\", \"test_latex_Poly_order\"]", + "pass_to_pass": "[\"test_printmethod\", \"test_latex_basic\", \"test_latex_builtins\", \"test_latex_SingularityFunction\", \"test_latex_cycle\", \"test_latex_permutation\", \"test_latex_Float\", \"test_latex_vector_expressions\", \"test_latex_symbols\", \"test_latex_functions\", \"test_function_subclass_different_name\", \"test_hyper_printing\", \"test_latex_bessel\", \"test_latex_fresnel\", \"test_latex_brackets\", \"test_latex_indexed\", \"test_latex_derivatives\", \"test_latex_subs\", \"test_latex_integrals\", \"test_latex_sets\", \"test_latex_SetExpr\", \"test_latex_Range\", \"test_latex_sequences\", \"test_latex_FourierSeries\", \"test_latex_FormalPowerSeries\", \"test_latex_intervals\", \"test_latex_AccumuBounds\", \"test_latex_emptyset\", \"test_latex_commutator\", \"test_latex_union\", \"test_latex_symmetric_difference\", \"test_latex_Complement\", \"test_latex_Complexes\", \"test_latex_productset\", \"test_latex_Naturals\", \"test_latex_Naturals0\", \"test_latex_Integers\", \"test_latex_ImageSet\", \"test_latex_ConditionSet\", \"test_latex_ComplexRegion\", \"test_latex_Contains\", \"test_latex_sum\", \"test_latex_product\", \"test_latex_limits\", \"test_latex_log\", \"test_issue_3568\", \"test_latex\", \"test_latex_dict\", \"test_latex_list\", \"test_latex_rational\", \"test_latex_inverse\", \"test_latex_DiracDelta\", \"test_latex_Heaviside\", \"test_latex_KroneckerDelta\", \"test_latex_LeviCivita\", \"test_mode\", \"test_latex_Piecewise\", \"test_latex_Matrix\", \"test_latex_matrix_with_functions\", \"test_latex_NDimArray\", \"test_latex_mul_symbol\", \"test_latex_issue_4381\", \"test_latex_issue_4576\", \"test_latex_pow_fraction\", \"test_noncommutative\", \"test_latex_order\", \"test_latex_Lambda\", \"test_latex_PolyElement\", \"test_latex_FracElement\", \"test_latex_ComplexRootOf\", \"test_latex_RootSum\", \"test_settings\", \"test_latex_numbers\", \"test_latex_euler\", \"test_lamda\", \"test_custom_symbol_names\", \"test_matAdd\", \"test_matMul\", \"test_latex_MatrixSlice\", \"test_latex_RandomDomain\", \"test_PrettyPoly\", \"test_integral_transforms\", \"test_PolynomialRingBase\", \"test_categories\", \"test_Modules\", \"test_QuotientRing\", \"test_Tr\", \"test_Adjoint\", \"test_Hadamard\", \"test_ZeroMatrix\", \"test_boolean_args_order\", \"test_imaginary\", \"test_builtins_without_args\", \"test_latex_greek_functions\", \"test_translate\", \"test_other_symbols\", \"test_modifiers\", \"test_greek_symbols\", \"test_builtin_no_args\", \"test_issue_6853\", \"test_Mul\", \"test_Pow\", \"test_issue_7180\", \"test_issue_8409\", \"test_issue_7117\", \"test_issue_2934\", \"test_issue_10489\", \"test_issue_12886\", \"test_issue_13651\", \"test_latex_UnevaluatedExpr\", \"test_MatrixElement_printing\", \"test_MatrixSymbol_printing\", \"test_Quaternion_latex_printing\", \"test_TensorProduct_printing\", \"test_WedgeProduct_printing\", \"test_units\"]", + "expected_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_FourierSeries" + ] + }, + "test_file_spans": { + "sympy/printing/tests/test_latex.py": [ + "test_latex_FracElement" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-14396", + "repo": "sympy/sympy", + "base_commit": "f35ad6411f86a15dd78db39c29d1e5291f66f9b5", + "problem_statement": "Poly(domain='RR[y,z]') doesn't work\n``` py\nIn [14]: Poly(1.2*x*y*z, x)\nOut[14]: Poly(1.2*y*z*x, x, domain='RR[y,z]')\n\nIn [15]: Poly(1.2*x*y*z, x, domain='RR[y,z]')\n---------------------------------------------------------------------------\nOptionError Traceback (most recent call last)\n in ()\n----> 1 Poly(1.2*x*y*z, x, domain='RR[y,z]')\n\n/Users/aaronmeurer/Documents/Python/sympy/sympy-scratch/sympy/polys/polytools.py in __new__(cls, rep, *gens, **args)\n 69 def __new__(cls, rep, *gens, **args):\n 70 \"\"\"Create a new polynomial instance out of something useful. \"\"\"\n---> 71 opt = options.build_options(gens, args)\n 72\n 73 if 'order' in opt:\n\n/Users/aaronmeurer/Documents/Python/sympy/sympy-scratch/sympy/polys/polyoptions.py in build_options(gens, args)\n 718\n 719 if len(args) != 1 or 'opt' not in args or gens:\n--> 720 return Options(gens, args)\n 721 else:\n 722 return args['opt']\n\n/Users/aaronmeurer/Documents/Python/sympy/sympy-scratch/sympy/polys/polyoptions.py in __init__(self, gens, args, flags, strict)\n 151 self[option] = cls.preprocess(value)\n 152\n--> 153 preprocess_options(args)\n 154\n 155 for key, value in dict(defaults).items():\n\n/Users/aaronmeurer/Documents/Python/sympy/sympy-scratch/sympy/polys/polyoptions.py in preprocess_options(args)\n 149\n 150 if value is not None:\n--> 151 self[option] = cls.preprocess(value)\n 152\n 153 preprocess_options(args)\n\n/Users/aaronmeurer/Documents/Python/sympy/sympy-scratch/sympy/polys/polyoptions.py in preprocess(cls, domain)\n 480 return sympy.polys.domains.QQ.algebraic_field(*gens)\n 481\n--> 482 raise OptionError('expected a valid domain specification, got %s' % domain)\n 483\n 484 @classmethod\n\nOptionError: expected a valid domain specification, got RR[y,z]\n```\n\nAlso, the wording of error message could be improved\n\n", + "golden_patch": "diff --git a/sympy/polys/polyoptions.py b/sympy/polys/polyoptions.py\n--- a/sympy/polys/polyoptions.py\n+++ b/sympy/polys/polyoptions.py\n@@ -405,7 +405,7 @@ class Domain(with_metaclass(OptionType, Option)):\n _re_realfield = re.compile(r\"^(R|RR)(_(\\d+))?$\")\n _re_complexfield = re.compile(r\"^(C|CC)(_(\\d+))?$\")\n _re_finitefield = re.compile(r\"^(FF|GF)\\((\\d+)\\)$\")\n- _re_polynomial = re.compile(r\"^(Z|ZZ|Q|QQ)\\[(.+)\\]$\")\n+ _re_polynomial = re.compile(r\"^(Z|ZZ|Q|QQ|R|RR|C|CC)\\[(.+)\\]$\")\n _re_fraction = re.compile(r\"^(Z|ZZ|Q|QQ)\\((.+)\\)$\")\n _re_algebraic = re.compile(r\"^(Q|QQ)\\<(.+)\\>$\")\n \n@@ -459,8 +459,12 @@ def preprocess(cls, domain):\n \n if ground in ['Z', 'ZZ']:\n return sympy.polys.domains.ZZ.poly_ring(*gens)\n- else:\n+ elif ground in ['Q', 'QQ']:\n return sympy.polys.domains.QQ.poly_ring(*gens)\n+ elif ground in ['R', 'RR']:\n+ return sympy.polys.domains.RR.poly_ring(*gens)\n+ else:\n+ return sympy.polys.domains.CC.poly_ring(*gens)\n \n r = cls._re_fraction.match(domain)\n \n", + "test_patch": "diff --git a/sympy/polys/tests/test_polyoptions.py b/sympy/polys/tests/test_polyoptions.py\n--- a/sympy/polys/tests/test_polyoptions.py\n+++ b/sympy/polys/tests/test_polyoptions.py\n@@ -6,7 +6,7 @@\n Frac, Formal, Polys, Include, All, Gen, Symbols, Method)\n \n from sympy.polys.orderings import lex\n-from sympy.polys.domains import FF, GF, ZZ, QQ, EX\n+from sympy.polys.domains import FF, GF, ZZ, QQ, RR, CC, EX\n \n from sympy.polys.polyerrors import OptionError, GeneratorsError\n \n@@ -176,15 +176,23 @@ def test_Domain_preprocess():\n \n assert Domain.preprocess('Z[x]') == ZZ[x]\n assert Domain.preprocess('Q[x]') == QQ[x]\n+ assert Domain.preprocess('R[x]') == RR[x]\n+ assert Domain.preprocess('C[x]') == CC[x]\n \n assert Domain.preprocess('ZZ[x]') == ZZ[x]\n assert Domain.preprocess('QQ[x]') == QQ[x]\n+ assert Domain.preprocess('RR[x]') == RR[x]\n+ assert Domain.preprocess('CC[x]') == CC[x]\n \n assert Domain.preprocess('Z[x,y]') == ZZ[x, y]\n assert Domain.preprocess('Q[x,y]') == QQ[x, y]\n+ assert Domain.preprocess('R[x,y]') == RR[x, y]\n+ assert Domain.preprocess('C[x,y]') == CC[x, y]\n \n assert Domain.preprocess('ZZ[x,y]') == ZZ[x, y]\n assert Domain.preprocess('QQ[x,y]') == QQ[x, y]\n+ assert Domain.preprocess('RR[x,y]') == RR[x, y]\n+ assert Domain.preprocess('CC[x,y]') == CC[x, y]\n \n raises(OptionError, lambda: Domain.preprocess('Z()'))\n \n", + "fail_to_pass": "[\"test_Domain_preprocess\"]", + "pass_to_pass": "[\"test_Options_clone\", \"test_Expand_preprocess\", \"test_Expand_postprocess\", \"test_Gens_preprocess\", \"test_Gens_postprocess\", \"test_Wrt_preprocess\", \"test_Wrt_postprocess\", \"test_Sort_preprocess\", \"test_Sort_postprocess\", \"test_Order_preprocess\", \"test_Order_postprocess\", \"test_Field_preprocess\", \"test_Field_postprocess\", \"test_Greedy_preprocess\", \"test_Greedy_postprocess\", \"test_Domain_postprocess\", \"test_Split_preprocess\", \"test_Split_postprocess\", \"test_Gaussian_preprocess\", \"test_Gaussian_postprocess\", \"test_Extension_preprocess\", \"test_Extension_postprocess\", \"test_Modulus_preprocess\", \"test_Modulus_postprocess\", \"test_Symmetric_preprocess\", \"test_Symmetric_postprocess\", \"test_Strict_preprocess\", \"test_Strict_postprocess\", \"test_Auto_preprocess\", \"test_Auto_postprocess\", \"test_Frac_preprocess\", \"test_Frac_postprocess\", \"test_Formal_preprocess\", \"test_Formal_postprocess\", \"test_Polys_preprocess\", \"test_Polys_postprocess\", \"test_Include_preprocess\", \"test_Include_postprocess\", \"test_All_preprocess\", \"test_All_postprocess\", \"test_Gen_postprocess\", \"test_Symbols_preprocess\", \"test_Symbols_postprocess\", \"test_Method_preprocess\"]", + "expected_spans": { + "sympy/polys/polyoptions.py": [ + "Domain", + "Domain.preprocess" + ] + }, + "test_file_spans": { + "sympy/polys/tests/test_polyoptions.py": [ + "imports", + "test_Domain_preprocess" + ] + }, + "resolved_by": [ + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/polys/polyoptions.py": [ + "Domain.preprocess" + ] + }, + "alternative_spans": { + "sympy/polys/polyoptions.py": [ + "Domain.preprocess" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sympy/polys/polyoptions.py": [ + "Domain", + "Domain.preprocess" + ] + }, + "alternative_spans": { + "sympy/polys/polyoptions.py": [ + "Domain", + "Domain.preprocess" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sympy/polys/polyoptions.py": [ + "Domain", + "Domain.preprocess" + ] + }, + "alternative_spans": { + "sympy/polys/polyoptions.py": [ + "Domain", + "Domain.preprocess" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240702_codestory_aide_mixed", + "spans": { + "sympy/polys/polyoptions.py": [ + "Domain.preprocess" + ] + } + } + ] + }, + { + "instance_id": "scikit-learn__scikit-learn-10949", + "repo": "scikit-learn/scikit-learn", + "base_commit": "3b5abf76597ce6aff76192869f92647c1b5259e7", + "problem_statement": "warn_on_dtype with DataFrame\n#### Description\r\n\r\n``warn_on_dtype`` has no effect when input is a pandas ``DataFrame``\r\n\r\n#### Steps/Code to Reproduce\r\n```python\r\nfrom sklearn.utils.validation import check_array\r\nimport pandas as pd\r\ndf = pd.DataFrame([[1, 2, 3], [2, 3, 4]], dtype=object)\r\nchecked = check_array(df, warn_on_dtype=True)\r\n```\r\n\r\n#### Expected result: \r\n\r\n```python-traceback\r\nDataConversionWarning: Data with input dtype object was converted to float64.\r\n```\r\n\r\n#### Actual Results\r\nNo warning is thrown\r\n\r\n#### Versions\r\nLinux-4.4.0-116-generic-x86_64-with-debian-stretch-sid\r\nPython 3.6.3 |Anaconda, Inc.| (default, Nov 3 2017, 19:19:16) \r\n[GCC 7.2.0]\r\nNumPy 1.13.1\r\nSciPy 0.19.1\r\nScikit-Learn 0.20.dev0\r\nPandas 0.21.0\r\n\nwarn_on_dtype with DataFrame\n#### Description\r\n\r\n``warn_on_dtype`` has no effect when input is a pandas ``DataFrame``\r\n\r\n#### Steps/Code to Reproduce\r\n```python\r\nfrom sklearn.utils.validation import check_array\r\nimport pandas as pd\r\ndf = pd.DataFrame([[1, 2, 3], [2, 3, 4]], dtype=object)\r\nchecked = check_array(df, warn_on_dtype=True)\r\n```\r\n\r\n#### Expected result: \r\n\r\n```python-traceback\r\nDataConversionWarning: Data with input dtype object was converted to float64.\r\n```\r\n\r\n#### Actual Results\r\nNo warning is thrown\r\n\r\n#### Versions\r\nLinux-4.4.0-116-generic-x86_64-with-debian-stretch-sid\r\nPython 3.6.3 |Anaconda, Inc.| (default, Nov 3 2017, 19:19:16) \r\n[GCC 7.2.0]\r\nNumPy 1.13.1\r\nSciPy 0.19.1\r\nScikit-Learn 0.20.dev0\r\nPandas 0.21.0\r\n\n", + "golden_patch": "diff --git a/sklearn/utils/validation.py b/sklearn/utils/validation.py\n--- a/sklearn/utils/validation.py\n+++ b/sklearn/utils/validation.py\n@@ -466,6 +466,12 @@ def check_array(array, accept_sparse=False, accept_large_sparse=True,\n # not a data type (e.g. a column named dtype in a pandas DataFrame)\n dtype_orig = None\n \n+ # check if the object contains several dtypes (typically a pandas\n+ # DataFrame), and store them. If not, store None.\n+ dtypes_orig = None\n+ if hasattr(array, \"dtypes\") and hasattr(array, \"__array__\"):\n+ dtypes_orig = np.array(array.dtypes)\n+\n if dtype_numeric:\n if dtype_orig is not None and dtype_orig.kind == \"O\":\n # if input is object, convert to float.\n@@ -581,6 +587,16 @@ def check_array(array, accept_sparse=False, accept_large_sparse=True,\n if copy and np.may_share_memory(array, array_orig):\n array = np.array(array, dtype=dtype, order=order)\n \n+ if (warn_on_dtype and dtypes_orig is not None and\n+ {array.dtype} != set(dtypes_orig)):\n+ # if there was at the beginning some other types than the final one\n+ # (for instance in a DataFrame that can contain several dtypes) then\n+ # some data must have been converted\n+ msg = (\"Data with input dtype %s were all converted to %s%s.\"\n+ % (', '.join(map(str, sorted(set(dtypes_orig)))), array.dtype,\n+ context))\n+ warnings.warn(msg, DataConversionWarning, stacklevel=3)\n+\n return array\n \n \n", + "test_patch": "diff --git a/sklearn/utils/tests/test_validation.py b/sklearn/utils/tests/test_validation.py\n--- a/sklearn/utils/tests/test_validation.py\n+++ b/sklearn/utils/tests/test_validation.py\n@@ -7,6 +7,7 @@\n from itertools import product\n \n import pytest\n+from pytest import importorskip\n import numpy as np\n import scipy.sparse as sp\n from scipy import __version__ as scipy_version\n@@ -713,6 +714,38 @@ def test_suppress_validation():\n assert_raises(ValueError, assert_all_finite, X)\n \n \n+def test_check_dataframe_warns_on_dtype():\n+ # Check that warn_on_dtype also works for DataFrames.\n+ # https://github.com/scikit-learn/scikit-learn/issues/10948\n+ pd = importorskip(\"pandas\")\n+\n+ df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], dtype=object)\n+ assert_warns_message(DataConversionWarning,\n+ \"Data with input dtype object were all converted to \"\n+ \"float64.\",\n+ check_array, df, dtype=np.float64, warn_on_dtype=True)\n+ assert_warns(DataConversionWarning, check_array, df,\n+ dtype='numeric', warn_on_dtype=True)\n+ assert_no_warnings(check_array, df, dtype='object', warn_on_dtype=True)\n+\n+ # Also check that it raises a warning for mixed dtypes in a DataFrame.\n+ df_mixed = pd.DataFrame([['1', 2, 3], ['4', 5, 6]])\n+ assert_warns(DataConversionWarning, check_array, df_mixed,\n+ dtype=np.float64, warn_on_dtype=True)\n+ assert_warns(DataConversionWarning, check_array, df_mixed,\n+ dtype='numeric', warn_on_dtype=True)\n+ assert_warns(DataConversionWarning, check_array, df_mixed,\n+ dtype=object, warn_on_dtype=True)\n+\n+ # Even with numerical dtypes, a conversion can be made because dtypes are\n+ # uniformized throughout the array.\n+ df_mixed_numeric = pd.DataFrame([[1., 2, 3], [4., 5, 6]])\n+ assert_warns(DataConversionWarning, check_array, df_mixed_numeric,\n+ dtype='numeric', warn_on_dtype=True)\n+ assert_no_warnings(check_array, df_mixed_numeric.astype(int),\n+ dtype='numeric', warn_on_dtype=True)\n+\n+\n class DummyMemory(object):\n def cache(self, func):\n return func\n", + "fail_to_pass": "[\"sklearn/utils/tests/test_validation.py::test_check_dataframe_warns_on_dtype\"]", + "pass_to_pass": "[\"sklearn/utils/tests/test_validation.py::test_as_float_array\", \"sklearn/utils/tests/test_validation.py::test_as_float_array_nan[X0]\", \"sklearn/utils/tests/test_validation.py::test_as_float_array_nan[X1]\", \"sklearn/utils/tests/test_validation.py::test_np_matrix\", \"sklearn/utils/tests/test_validation.py::test_memmap\", \"sklearn/utils/tests/test_validation.py::test_ordering\", \"sklearn/utils/tests/test_validation.py::test_check_array_force_all_finite_valid[asarray-inf-False]\", \"sklearn/utils/tests/test_validation.py::test_check_array_force_all_finite_valid[asarray-nan-allow-nan]\", \"sklearn/utils/tests/test_validation.py::test_check_array_force_all_finite_valid[asarray-nan-False]\", \"sklearn/utils/tests/test_validation.py::test_check_array_force_all_finite_valid[csr_matrix-inf-False]\", \"sklearn/utils/tests/test_validation.py::test_check_array_force_all_finite_valid[csr_matrix-nan-allow-nan]\", \"sklearn/utils/tests/test_validation.py::test_check_array_force_all_finite_valid[csr_matrix-nan-False]\", \"sklearn/utils/tests/test_validation.py::test_check_array\", \"sklearn/utils/tests/test_validation.py::test_check_array_pandas_dtype_object_conversion\", \"sklearn/utils/tests/test_validation.py::test_check_array_on_mock_dataframe\", \"sklearn/utils/tests/test_validation.py::test_check_array_dtype_stability\", \"sklearn/utils/tests/test_validation.py::test_check_array_dtype_warning\", \"sklearn/utils/tests/test_validation.py::test_check_array_accept_sparse_type_exception\", \"sklearn/utils/tests/test_validation.py::test_check_array_accept_sparse_no_exception\", \"sklearn/utils/tests/test_validation.py::test_check_array_accept_large_sparse_no_exception[csr]\", \"sklearn/utils/tests/test_validation.py::test_check_array_accept_large_sparse_no_exception[csc]\", \"sklearn/utils/tests/test_validation.py::test_check_array_accept_large_sparse_no_exception[coo]\", \"sklearn/utils/tests/test_validation.py::test_check_array_accept_large_sparse_no_exception[bsr]\", \"sklearn/utils/tests/test_validation.py::test_check_array_accept_large_sparse_raise_exception[csr]\", \"sklearn/utils/tests/test_validation.py::test_check_array_accept_large_sparse_raise_exception[csc]\", \"sklearn/utils/tests/test_validation.py::test_check_array_accept_large_sparse_raise_exception[coo]\", \"sklearn/utils/tests/test_validation.py::test_check_array_accept_large_sparse_raise_exception[bsr]\", \"sklearn/utils/tests/test_validation.py::test_check_array_large_indices_non_supported_scipy_version[csr]\", \"sklearn/utils/tests/test_validation.py::test_check_array_large_indices_non_supported_scipy_version[csc]\", \"sklearn/utils/tests/test_validation.py::test_check_array_large_indices_non_supported_scipy_version[coo]\", \"sklearn/utils/tests/test_validation.py::test_check_array_large_indices_non_supported_scipy_version[bsr]\", \"sklearn/utils/tests/test_validation.py::test_check_array_min_samples_and_features_messages\", \"sklearn/utils/tests/test_validation.py::test_check_array_complex_data_error\", \"sklearn/utils/tests/test_validation.py::test_has_fit_parameter\", \"sklearn/utils/tests/test_validation.py::test_check_symmetric\", \"sklearn/utils/tests/test_validation.py::test_check_is_fitted\", \"sklearn/utils/tests/test_validation.py::test_check_consistent_length\", \"sklearn/utils/tests/test_validation.py::test_check_dataframe_fit_attribute\", \"sklearn/utils/tests/test_validation.py::test_suppress_validation\", \"sklearn/utils/tests/test_validation.py::test_check_memory\", \"sklearn/utils/tests/test_validation.py::test_check_array_memmap[True]\", \"sklearn/utils/tests/test_validation.py::test_check_array_memmap[False]\"]", + "expected_spans": { + "sklearn/utils/validation.py": [ + "check_array" + ] + }, + "test_file_spans": { + "sklearn/utils/tests/test_validation.py": [ + "imports", + "DummyMemory" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "scikit-learn__scikit-learn-11040", + "repo": "scikit-learn/scikit-learn", + "base_commit": "96a02f3934952d486589dddd3f00b40d5a5ab5f2", + "problem_statement": "Missing parameter validation in Neighbors estimator for float n_neighbors\n```python\r\nfrom sklearn.neighbors import NearestNeighbors\r\nfrom sklearn.datasets import make_blobs\r\nX, y = make_blobs()\r\nneighbors = NearestNeighbors(n_neighbors=3.)\r\nneighbors.fit(X)\r\nneighbors.kneighbors(X)\r\n```\r\n```\r\n~/checkout/scikit-learn/sklearn/neighbors/binary_tree.pxi in sklearn.neighbors.kd_tree.NeighborsHeap.__init__()\r\n\r\nTypeError: 'float' object cannot be interpreted as an integer\r\n```\r\nThis should be caught earlier and a more helpful error message should be raised (or we could be lenient and cast to integer, but I think a better error might be better).\r\n\r\nWe need to make sure that \r\n```python\r\nneighbors.kneighbors(X, n_neighbors=3.)\r\n```\r\nalso works.\n", + "golden_patch": "diff --git a/sklearn/neighbors/base.py b/sklearn/neighbors/base.py\n--- a/sklearn/neighbors/base.py\n+++ b/sklearn/neighbors/base.py\n@@ -258,6 +258,12 @@ def _fit(self, X):\n \"Expected n_neighbors > 0. Got %d\" %\n self.n_neighbors\n )\n+ else:\n+ if not np.issubdtype(type(self.n_neighbors), np.integer):\n+ raise TypeError(\n+ \"n_neighbors does not take %s value, \"\n+ \"enter integer value\" %\n+ type(self.n_neighbors))\n \n return self\n \n@@ -327,6 +333,17 @@ class from an array representing our data set and ask who's\n \n if n_neighbors is None:\n n_neighbors = self.n_neighbors\n+ elif n_neighbors <= 0:\n+ raise ValueError(\n+ \"Expected n_neighbors > 0. Got %d\" %\n+ n_neighbors\n+ )\n+ else:\n+ if not np.issubdtype(type(n_neighbors), np.integer):\n+ raise TypeError(\n+ \"n_neighbors does not take %s value, \"\n+ \"enter integer value\" %\n+ type(n_neighbors))\n \n if X is not None:\n query_is_train = False\n", + "test_patch": "diff --git a/sklearn/neighbors/tests/test_neighbors.py b/sklearn/neighbors/tests/test_neighbors.py\n--- a/sklearn/neighbors/tests/test_neighbors.py\n+++ b/sklearn/neighbors/tests/test_neighbors.py\n@@ -18,6 +18,7 @@\n from sklearn.utils.testing import assert_greater\n from sklearn.utils.testing import assert_in\n from sklearn.utils.testing import assert_raises\n+from sklearn.utils.testing import assert_raises_regex\n from sklearn.utils.testing import assert_true\n from sklearn.utils.testing import assert_warns\n from sklearn.utils.testing import assert_warns_message\n@@ -108,6 +109,21 @@ def test_unsupervised_inputs():\n assert_array_almost_equal(ind1, ind2)\n \n \n+def test_n_neighbors_datatype():\n+ # Test to check whether n_neighbors is integer\n+ X = [[1, 1], [1, 1], [1, 1]]\n+ expected_msg = \"n_neighbors does not take .*float.* \" \\\n+ \"value, enter integer value\"\n+ msg = \"Expected n_neighbors > 0. Got -3\"\n+\n+ neighbors_ = neighbors.NearestNeighbors(n_neighbors=3.)\n+ assert_raises_regex(TypeError, expected_msg, neighbors_.fit, X)\n+ assert_raises_regex(ValueError, msg,\n+ neighbors_.kneighbors, X=X, n_neighbors=-3)\n+ assert_raises_regex(TypeError, expected_msg,\n+ neighbors_.kneighbors, X=X, n_neighbors=3.)\n+\n+\n def test_precomputed(random_state=42):\n \"\"\"Tests unsupervised NearestNeighbors with a distance matrix.\"\"\"\n # Note: smaller samples may result in spurious test success\n", + "fail_to_pass": "[\"sklearn/neighbors/tests/test_neighbors.py::test_n_neighbors_datatype\"]", + "pass_to_pass": "[\"sklearn/neighbors/tests/test_neighbors.py::test_unsupervised_kneighbors\", \"sklearn/neighbors/tests/test_neighbors.py::test_unsupervised_inputs\", \"sklearn/neighbors/tests/test_neighbors.py::test_precomputed\", \"sklearn/neighbors/tests/test_neighbors.py::test_precomputed_cross_validation\", \"sklearn/neighbors/tests/test_neighbors.py::test_unsupervised_radius_neighbors\", \"sklearn/neighbors/tests/test_neighbors.py::test_kneighbors_classifier\", \"sklearn/neighbors/tests/test_neighbors.py::test_kneighbors_classifier_float_labels\", \"sklearn/neighbors/tests/test_neighbors.py::test_kneighbors_classifier_predict_proba\", \"sklearn/neighbors/tests/test_neighbors.py::test_radius_neighbors_classifier\", \"sklearn/neighbors/tests/test_neighbors.py::test_radius_neighbors_classifier_when_no_neighbors\", \"sklearn/neighbors/tests/test_neighbors.py::test_radius_neighbors_classifier_outlier_labeling\", \"sklearn/neighbors/tests/test_neighbors.py::test_radius_neighbors_classifier_zero_distance\", \"sklearn/neighbors/tests/test_neighbors.py::test_neighbors_regressors_zero_distance\", \"sklearn/neighbors/tests/test_neighbors.py::test_radius_neighbors_boundary_handling\", \"sklearn/neighbors/tests/test_neighbors.py::test_RadiusNeighborsClassifier_multioutput\", \"sklearn/neighbors/tests/test_neighbors.py::test_kneighbors_classifier_sparse\", \"sklearn/neighbors/tests/test_neighbors.py::test_KNeighborsClassifier_multioutput\", \"sklearn/neighbors/tests/test_neighbors.py::test_kneighbors_regressor\", \"sklearn/neighbors/tests/test_neighbors.py::test_KNeighborsRegressor_multioutput_uniform_weight\", \"sklearn/neighbors/tests/test_neighbors.py::test_kneighbors_regressor_multioutput\", \"sklearn/neighbors/tests/test_neighbors.py::test_radius_neighbors_regressor\", \"sklearn/neighbors/tests/test_neighbors.py::test_RadiusNeighborsRegressor_multioutput_with_uniform_weight\", \"sklearn/neighbors/tests/test_neighbors.py::test_RadiusNeighborsRegressor_multioutput\", \"sklearn/neighbors/tests/test_neighbors.py::test_kneighbors_regressor_sparse\", \"sklearn/neighbors/tests/test_neighbors.py::test_neighbors_iris\", \"sklearn/neighbors/tests/test_neighbors.py::test_neighbors_digits\", \"sklearn/neighbors/tests/test_neighbors.py::test_kneighbors_graph\", \"sklearn/neighbors/tests/test_neighbors.py::test_kneighbors_graph_sparse\", \"sklearn/neighbors/tests/test_neighbors.py::test_radius_neighbors_graph\", \"sklearn/neighbors/tests/test_neighbors.py::test_radius_neighbors_graph_sparse\", \"sklearn/neighbors/tests/test_neighbors.py::test_neighbors_badargs\", \"sklearn/neighbors/tests/test_neighbors.py::test_neighbors_metrics\", \"sklearn/neighbors/tests/test_neighbors.py::test_callable_metric\", \"sklearn/neighbors/tests/test_neighbors.py::test_valid_brute_metric_for_auto_algorithm\", \"sklearn/neighbors/tests/test_neighbors.py::test_metric_params_interface\", \"sklearn/neighbors/tests/test_neighbors.py::test_predict_sparse_ball_kd_tree\", \"sklearn/neighbors/tests/test_neighbors.py::test_non_euclidean_kneighbors\", \"sklearn/neighbors/tests/test_neighbors.py::test_k_and_radius_neighbors_train_is_not_query\", \"sklearn/neighbors/tests/test_neighbors.py::test_k_and_radius_neighbors_X_None\", \"sklearn/neighbors/tests/test_neighbors.py::test_k_and_radius_neighbors_duplicates\", \"sklearn/neighbors/tests/test_neighbors.py::test_include_self_neighbors_graph\", \"sklearn/neighbors/tests/test_neighbors.py::test_dtype_convert\", \"sklearn/neighbors/tests/test_neighbors.py::test_sparse_metric_callable\", \"sklearn/neighbors/tests/test_neighbors.py::test_pairwise_boolean_distance\"]", + "expected_spans": { + "sklearn/neighbors/base.py": [ + "NeighborsBase._fit", + "KNeighborsMixin.kneighbors" + ] + }, + "test_file_spans": { + "sklearn/neighbors/tests/test_neighbors.py": [ + "imports", + "test_precomputed" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-14774", + "repo": "sympy/sympy", + "base_commit": "8fc63c2d71752389a44367b8ef4aba8a91af6a45", + "problem_statement": "Latex printer does not support full inverse trig function names for acsc and asec\nFor example\r\n`latex(asin(x), inv_trig_style=\"full\")` works as expected returning `'\\\\arcsin{\\\\left (x \\\\right )}'`\r\nBut `latex(acsc(x), inv_trig_style=\"full\")` gives `'\\\\operatorname{acsc}{\\\\left (x \\\\right )}'` instead of `'\\\\operatorname{arccsc}{\\\\left (x \\\\right )}'`\r\n\r\nA fix seems to be to change line 743 of sympy/printing/latex.py from\r\n`inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acot\"]` to\r\n`inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acsc\", \"asec\", \"acot\"]`\n", + "golden_patch": "diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -740,7 +740,7 @@ def _print_Function(self, expr, exp=None):\n len(args) == 1 and \\\n not self._needs_function_brackets(expr.args[0])\n \n- inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acot\"]\n+ inv_trig_table = [\"asin\", \"acos\", \"atan\", \"acsc\", \"asec\", \"acot\"]\n \n # If the function is an inverse trig function, handle the style\n if func in inv_trig_table:\n", + "test_patch": "diff --git a/sympy/printing/tests/test_latex.py b/sympy/printing/tests/test_latex.py\n--- a/sympy/printing/tests/test_latex.py\n+++ b/sympy/printing/tests/test_latex.py\n@@ -6,7 +6,7 @@\n Lambda, LaplaceTransform, Limit, Matrix, Max, MellinTransform, Min, Mul,\n Order, Piecewise, Poly, ring, field, ZZ, Pow, Product, Range, Rational,\n RisingFactorial, rootof, RootSum, S, Shi, Si, SineTransform, Subs,\n- Sum, Symbol, ImageSet, Tuple, Union, Ynm, Znm, arg, asin, Mod,\n+ Sum, Symbol, ImageSet, Tuple, Union, Ynm, Znm, arg, asin, acsc, Mod,\n assoc_laguerre, assoc_legendre, beta, binomial, catalan, ceiling, Complement,\n chebyshevt, chebyshevu, conjugate, cot, coth, diff, dirichlet_eta, euler,\n exp, expint, factorial, factorial2, floor, gamma, gegenbauer, hermite,\n@@ -305,6 +305,8 @@ def test_latex_functions():\n assert latex(asin(x**2), inv_trig_style=\"power\",\n fold_func_brackets=True) == \\\n r\"\\sin^{-1} {x^{2}}\"\n+ assert latex(acsc(x), inv_trig_style=\"full\") == \\\n+ r\"\\operatorname{arccsc}{\\left (x \\right )}\"\n \n assert latex(factorial(k)) == r\"k!\"\n assert latex(factorial(-k)) == r\"\\left(- k\\right)!\"\n", + "fail_to_pass": "[\"test_latex_functions\"]", + "pass_to_pass": "[\"test_printmethod\", \"test_latex_basic\", \"test_latex_builtins\", \"test_latex_SingularityFunction\", \"test_latex_cycle\", \"test_latex_permutation\", \"test_latex_Float\", \"test_latex_vector_expressions\", \"test_latex_symbols\", \"test_function_subclass_different_name\", \"test_hyper_printing\", \"test_latex_bessel\", \"test_latex_fresnel\", \"test_latex_brackets\", \"test_latex_indexed\", \"test_latex_derivatives\", \"test_latex_subs\", \"test_latex_integrals\", \"test_latex_sets\", \"test_latex_SetExpr\", \"test_latex_Range\", \"test_latex_sequences\", \"test_latex_FourierSeries\", \"test_latex_FormalPowerSeries\", \"test_latex_intervals\", \"test_latex_AccumuBounds\", \"test_latex_emptyset\", \"test_latex_commutator\", \"test_latex_union\", \"test_latex_symmetric_difference\", \"test_latex_Complement\", \"test_latex_Complexes\", \"test_latex_productset\", \"test_latex_Naturals\", \"test_latex_Naturals0\", \"test_latex_Integers\", \"test_latex_ImageSet\", \"test_latex_ConditionSet\", \"test_latex_ComplexRegion\", \"test_latex_Contains\", \"test_latex_sum\", \"test_latex_product\", \"test_latex_limits\", \"test_latex_log\", \"test_issue_3568\", \"test_latex\", \"test_latex_dict\", \"test_latex_list\", \"test_latex_rational\", \"test_latex_inverse\", \"test_latex_DiracDelta\", \"test_latex_Heaviside\", \"test_latex_KroneckerDelta\", \"test_latex_LeviCivita\", \"test_mode\", \"test_latex_Piecewise\", \"test_latex_Matrix\", \"test_latex_matrix_with_functions\", \"test_latex_NDimArray\", \"test_latex_mul_symbol\", \"test_latex_issue_4381\", \"test_latex_issue_4576\", \"test_latex_pow_fraction\", \"test_noncommutative\", \"test_latex_order\", \"test_latex_Lambda\", \"test_latex_PolyElement\", \"test_latex_FracElement\", \"test_latex_Poly\", \"test_latex_Poly_order\", \"test_latex_ComplexRootOf\", \"test_latex_RootSum\", \"test_settings\", \"test_latex_numbers\", \"test_latex_euler\", \"test_lamda\", \"test_custom_symbol_names\", \"test_matAdd\", \"test_matMul\", \"test_latex_MatrixSlice\", \"test_latex_RandomDomain\", \"test_PrettyPoly\", \"test_integral_transforms\", \"test_categories\", \"test_Modules\", \"test_QuotientRing\", \"test_Tr\", \"test_Adjoint\", \"test_Hadamard\", \"test_ZeroMatrix\", \"test_boolean_args_order\", \"test_imaginary\", \"test_builtins_without_args\", \"test_latex_greek_functions\", \"test_translate\", \"test_other_symbols\", \"test_modifiers\", \"test_greek_symbols\", \"test_builtin_no_args\", \"test_issue_6853\", \"test_Mul\", \"test_Pow\", \"test_issue_7180\", \"test_issue_8409\", \"test_issue_7117\", \"test_issue_2934\", \"test_issue_10489\", \"test_issue_12886\", \"test_issue_13651\", \"test_latex_UnevaluatedExpr\", \"test_MatrixElement_printing\", \"test_MatrixSymbol_printing\", \"test_Quaternion_latex_printing\", \"test_TensorProduct_printing\"]", + "expected_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "test_file_spans": { + "sympy/printing/tests/test_latex.py": [ + "imports", + "test_latex_functions" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "sympy/parsing/sympy_parser.py": [ + "EvaluateFalseTransformer.visit_BinOp" + ], + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sympy/printing/latex.py": [ + "docstring", + "imports", + "impl:5", + "impl:11", + "LatexPrinter", + "LatexPrinter.__init__", + "LatexPrinter.parenthesize", + "LatexPrinter.doprint", + "LatexPrinter._needs_brackets", + "LatexPrinter._needs_function_brackets", + "LatexPrinter._needs_mul_brackets", + "LatexPrinter._needs_add_brackets", + "LatexPrinter._do_exponent", + "LatexPrinter._print_bool", + "LatexPrinter:6", + "LatexPrinter._print_NoneType", + "LatexPrinter._print_Add", + "LatexPrinter._print_Cycle", + "LatexPrinter:10", + "LatexPrinter._print_Float", + "LatexPrinter._print_Cross", + "LatexPrinter._print_Curl", + "LatexPrinter._print_Divergence", + "LatexPrinter._print_Dot", + "LatexPrinter._print_Gradient", + "LatexPrinter._print_Mul", + "LatexPrinter._print_Pow", + "LatexPrinter._print_UnevaluatedExpr", + "LatexPrinter._print_Sum", + "LatexPrinter._print_Product", + "LatexPrinter._print_BasisDependent", + "LatexPrinter._print_Indexed", + "LatexPrinter._print_Derivative", + "LatexPrinter._print_Subs", + "LatexPrinter._print_Integral", + "LatexPrinter._print_Limit", + "LatexPrinter._hprint_Function", + "LatexPrinter._print_Function", + "LatexPrinter._special_function_classes", + "LatexPrinter._print_Lambda", + "LatexPrinter._print_Min", + "LatexPrinter._print_Max", + "LatexPrinter._print_floor", + "LatexPrinter._print_ceiling", + "LatexPrinter._print_log", + "LatexPrinter._print_Abs", + "LatexPrinter:12", + "LatexPrinter._print_re", + "LatexPrinter._print_im", + "LatexPrinter._print_Not", + "LatexPrinter._print_LogOp", + "LatexPrinter._print_And", + "LatexPrinter._print_Or", + "LatexPrinter._print_Xor", + "LatexPrinter._print_Implies", + "LatexPrinter._print_Equivalent", + "LatexPrinter._print_conjugate", + "LatexPrinter._print_polar_lift", + "LatexPrinter._print_ExpBase", + "LatexPrinter._print_elliptic_k", + "LatexPrinter._print_elliptic_f", + "LatexPrinter._print_elliptic_e", + "LatexPrinter._print_elliptic_pi", + "LatexPrinter._print_beta", + "LatexPrinter._print_gamma", + "LatexPrinter._print_uppergamma", + "LatexPrinter._print_lowergamma", + "LatexPrinter._print_Chi", + "LatexPrinter._print_expint", + "LatexPrinter._print_fresnels", + "LatexPrinter._print_fresnelc", + "LatexPrinter._print_subfactorial", + "LatexPrinter._print_factorial", + "LatexPrinter._print_factorial2", + "LatexPrinter._print_binomial", + "LatexPrinter._print_RisingFactorial", + "LatexPrinter._print_FallingFactorial", + "LatexPrinter._hprint_BesselBase", + "LatexPrinter._hprint_vec", + "LatexPrinter._hprint_airy", + "LatexPrinter._hprint_airy_prime", + "LatexPrinter._print_hyper", + "LatexPrinter._print_meijerg", + "LatexPrinter._print_dirichlet_eta", + "LatexPrinter._print_zeta", + "LatexPrinter._print_lerchphi", + "LatexPrinter._print_polylog", + "LatexPrinter._print_jacobi", + "LatexPrinter._print_gegenbauer", + "LatexPrinter._print_chebyshevt", + "LatexPrinter._print_chebyshevu", + "LatexPrinter._print_legendre", + "LatexPrinter._print_assoc_legendre", + "LatexPrinter._print_hermite", + "LatexPrinter._print_laguerre", + "LatexPrinter._print_assoc_laguerre", + "LatexPrinter._print_Ynm", + "LatexPrinter._print_Znm", + "LatexPrinter._print_Rational", + "LatexPrinter._print_Order", + "LatexPrinter._print_Symbol", + "LatexPrinter:14", + "LatexPrinter._deal_with_super_sub", + "LatexPrinter._print_Relational", + "LatexPrinter._print_Piecewise", + "LatexPrinter._print_MatrixBase", + "LatexPrinter:18", + "LatexPrinter._print_MatrixElement", + "LatexPrinter._print_MatrixSlice", + "LatexPrinter._print_Transpose", + "LatexPrinter._print_Adjoint", + "LatexPrinter._print_MatAdd", + "LatexPrinter._print_MatMul", + "LatexPrinter._print_Mod", + "LatexPrinter._print_HadamardProduct", + "LatexPrinter._print_KroneckerProduct", + "LatexPrinter._print_MatPow", + "LatexPrinter._print_ZeroMatrix", + "LatexPrinter._print_Identity", + "LatexPrinter._print_NDimArray", + "LatexPrinter:22", + "LatexPrinter._print_tuple", + "LatexPrinter._print_TensorProduct", + "LatexPrinter._print_WedgeProduct", + "LatexPrinter._print_Tuple", + "LatexPrinter._print_list", + "LatexPrinter._print_dict", + "LatexPrinter._print_Dict", + "LatexPrinter._print_DiracDelta", + "LatexPrinter._print_SingularityFunction", + "LatexPrinter._print_Heaviside", + "LatexPrinter._print_KroneckerDelta", + "LatexPrinter._print_LeviCivita", + "LatexPrinter._print_ProductSet", + "LatexPrinter._print_RandomDomain", + "LatexPrinter._print_set", + "LatexPrinter:30", + "LatexPrinter._print_Range", + "LatexPrinter._print_SeqFormula", + "LatexPrinter:32", + "LatexPrinter._print_Interval", + "LatexPrinter._print_AccumulationBounds", + "LatexPrinter._print_Union", + "LatexPrinter._print_Complement", + "LatexPrinter._print_Intersection", + "LatexPrinter._print_SymmetricDifference", + "LatexPrinter._print_EmptySet", + "LatexPrinter._print_Naturals", + "LatexPrinter._print_Naturals0", + "LatexPrinter._print_Integers", + "LatexPrinter._print_Reals", + "LatexPrinter._print_Complexes", + "LatexPrinter._print_ImageSet", + "LatexPrinter._print_ConditionSet", + "LatexPrinter._print_ComplexRegion", + "LatexPrinter._print_Contains", + "LatexPrinter._print_FourierSeries", + "LatexPrinter._print_FormalPowerSeries", + "LatexPrinter._print_FiniteField", + "LatexPrinter._print_IntegerRing", + "LatexPrinter._print_RationalField", + "LatexPrinter._print_RealField", + "LatexPrinter._print_ComplexField", + "LatexPrinter._print_PolynomialRing", + "LatexPrinter._print_FractionField", + "LatexPrinter._print_PolynomialRingBase", + "LatexPrinter._print_Poly", + "LatexPrinter._print_ComplexRootOf", + "LatexPrinter._print_RootSum", + "LatexPrinter._print_PolyElement", + "LatexPrinter._print_FracElement", + "LatexPrinter._print_euler", + "LatexPrinter._print_catalan", + "LatexPrinter._print_MellinTransform", + "LatexPrinter._print_InverseMellinTransform", + "LatexPrinter._print_LaplaceTransform", + "LatexPrinter._print_InverseLaplaceTransform", + "LatexPrinter._print_FourierTransform", + "LatexPrinter._print_InverseFourierTransform", + "LatexPrinter._print_SineTransform", + "LatexPrinter._print_InverseSineTransform", + "LatexPrinter._print_CosineTransform", + "LatexPrinter._print_InverseCosineTransform", + "LatexPrinter._print_DMP", + "LatexPrinter._print_Morphism", + "LatexPrinter._print_NamedMorphism", + "LatexPrinter._print_IdentityMorphism", + "LatexPrinter._print_CompositeMorphism", + "LatexPrinter._print_Category", + "LatexPrinter._print_Diagram", + "LatexPrinter._print_DiagramGrid", + "LatexPrinter._print_FreeModule", + "LatexPrinter._print_FreeModuleElement", + "LatexPrinter._print_SubModule", + "LatexPrinter._print_ModuleImplementedIdeal", + "LatexPrinter._print_Quaternion", + "LatexPrinter._print_QuotientRing", + "LatexPrinter._print_QuotientRingElement", + "LatexPrinter._print_QuotientModuleElement", + "LatexPrinter._print_QuotientModule", + "LatexPrinter._print_MatrixHomomorphism", + "LatexPrinter._print_BaseScalarField", + "LatexPrinter._print_BaseVectorField", + "LatexPrinter._print_Differential", + "LatexPrinter._print_Tr", + "LatexPrinter._print_totient", + "LatexPrinter._print_reduced_totient", + "LatexPrinter._print_divisor_sigma", + "LatexPrinter._print_udivisor_sigma", + "LatexPrinter._print_primenu", + "LatexPrinter._print_primeomega", + "translate", + "latex" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "docstring", + "imports", + "impl:5", + "impl:11", + "LatexPrinter", + "LatexPrinter.__init__", + "LatexPrinter.parenthesize", + "LatexPrinter.doprint", + "LatexPrinter._needs_brackets", + "LatexPrinter._needs_function_brackets", + "LatexPrinter._needs_mul_brackets", + "LatexPrinter._needs_add_brackets", + "LatexPrinter._do_exponent", + "LatexPrinter._print_bool", + "LatexPrinter:6", + "LatexPrinter._print_NoneType", + "LatexPrinter._print_Add", + "LatexPrinter._print_Cycle", + "LatexPrinter:10", + "LatexPrinter._print_Float", + "LatexPrinter._print_Cross", + "LatexPrinter._print_Curl", + "LatexPrinter._print_Divergence", + "LatexPrinter._print_Dot", + "LatexPrinter._print_Gradient", + "LatexPrinter._print_Mul", + "LatexPrinter._print_Pow", + "LatexPrinter._print_UnevaluatedExpr", + "LatexPrinter._print_Sum", + "LatexPrinter._print_Product", + "LatexPrinter._print_BasisDependent", + "LatexPrinter._print_Indexed", + "LatexPrinter._print_Derivative", + "LatexPrinter._print_Subs", + "LatexPrinter._print_Integral", + "LatexPrinter._print_Limit", + "LatexPrinter._hprint_Function", + "LatexPrinter._print_Function", + "LatexPrinter._special_function_classes", + "LatexPrinter._print_Lambda", + "LatexPrinter._print_Min", + "LatexPrinter._print_Max", + "LatexPrinter._print_floor", + "LatexPrinter._print_ceiling", + "LatexPrinter._print_log", + "LatexPrinter._print_Abs", + "LatexPrinter:12", + "LatexPrinter._print_re", + "LatexPrinter._print_im", + "LatexPrinter._print_Not", + "LatexPrinter._print_LogOp", + "LatexPrinter._print_And", + "LatexPrinter._print_Or", + "LatexPrinter._print_Xor", + "LatexPrinter._print_Implies", + "LatexPrinter._print_Equivalent", + "LatexPrinter._print_conjugate", + "LatexPrinter._print_polar_lift", + "LatexPrinter._print_ExpBase", + "LatexPrinter._print_elliptic_k", + "LatexPrinter._print_elliptic_f", + "LatexPrinter._print_elliptic_e", + "LatexPrinter._print_elliptic_pi", + "LatexPrinter._print_beta", + "LatexPrinter._print_gamma", + "LatexPrinter._print_uppergamma", + "LatexPrinter._print_lowergamma", + "LatexPrinter._print_Chi", + "LatexPrinter._print_expint", + "LatexPrinter._print_fresnels", + "LatexPrinter._print_fresnelc", + "LatexPrinter._print_subfactorial", + "LatexPrinter._print_factorial", + "LatexPrinter._print_factorial2", + "LatexPrinter._print_binomial", + "LatexPrinter._print_RisingFactorial", + "LatexPrinter._print_FallingFactorial", + "LatexPrinter._hprint_BesselBase", + "LatexPrinter._hprint_vec", + "LatexPrinter._hprint_airy", + "LatexPrinter._hprint_airy_prime", + "LatexPrinter._print_hyper", + "LatexPrinter._print_meijerg", + "LatexPrinter._print_dirichlet_eta", + "LatexPrinter._print_zeta", + "LatexPrinter._print_lerchphi", + "LatexPrinter._print_polylog", + "LatexPrinter._print_jacobi", + "LatexPrinter._print_gegenbauer", + "LatexPrinter._print_chebyshevt", + "LatexPrinter._print_chebyshevu", + "LatexPrinter._print_legendre", + "LatexPrinter._print_assoc_legendre", + "LatexPrinter._print_hermite", + "LatexPrinter._print_laguerre", + "LatexPrinter._print_assoc_laguerre", + "LatexPrinter._print_Ynm", + "LatexPrinter._print_Znm", + "LatexPrinter._print_Rational", + "LatexPrinter._print_Order", + "LatexPrinter._print_Symbol", + "LatexPrinter:14", + "LatexPrinter._deal_with_super_sub", + "LatexPrinter._print_Relational", + "LatexPrinter._print_Piecewise", + "LatexPrinter._print_MatrixBase", + "LatexPrinter:18", + "LatexPrinter._print_MatrixElement", + "LatexPrinter._print_MatrixSlice", + "LatexPrinter._print_Transpose", + "LatexPrinter._print_Adjoint", + "LatexPrinter._print_MatAdd", + "LatexPrinter._print_MatMul", + "LatexPrinter._print_Mod", + "LatexPrinter._print_HadamardProduct", + "LatexPrinter._print_KroneckerProduct", + "LatexPrinter._print_MatPow", + "LatexPrinter._print_ZeroMatrix", + "LatexPrinter._print_Identity", + "LatexPrinter._print_NDimArray", + "LatexPrinter:22", + "LatexPrinter._print_tuple", + "LatexPrinter._print_TensorProduct", + "LatexPrinter._print_WedgeProduct", + "LatexPrinter._print_Tuple", + "LatexPrinter._print_list", + "LatexPrinter._print_dict", + "LatexPrinter._print_Dict", + "LatexPrinter._print_DiracDelta", + "LatexPrinter._print_SingularityFunction", + "LatexPrinter._print_Heaviside", + "LatexPrinter._print_KroneckerDelta", + "LatexPrinter._print_LeviCivita", + "LatexPrinter._print_ProductSet", + "LatexPrinter._print_RandomDomain", + "LatexPrinter._print_set", + "LatexPrinter:30", + "LatexPrinter._print_Range", + "LatexPrinter._print_SeqFormula", + "LatexPrinter:32", + "LatexPrinter._print_Interval", + "LatexPrinter._print_AccumulationBounds", + "LatexPrinter._print_Union", + "LatexPrinter._print_Complement", + "LatexPrinter._print_Intersection", + "LatexPrinter._print_SymmetricDifference", + "LatexPrinter._print_EmptySet", + "LatexPrinter._print_Naturals", + "LatexPrinter._print_Naturals0", + "LatexPrinter._print_Integers", + "LatexPrinter._print_Reals", + "LatexPrinter._print_Complexes", + "LatexPrinter._print_ImageSet", + "LatexPrinter._print_ConditionSet", + "LatexPrinter._print_ComplexRegion", + "LatexPrinter._print_Contains", + "LatexPrinter._print_FourierSeries", + "LatexPrinter._print_FormalPowerSeries", + "LatexPrinter._print_FiniteField", + "LatexPrinter._print_IntegerRing", + "LatexPrinter._print_RationalField", + "LatexPrinter._print_RealField", + "LatexPrinter._print_ComplexField", + "LatexPrinter._print_PolynomialRing", + "LatexPrinter._print_FractionField", + "LatexPrinter._print_PolynomialRingBase", + "LatexPrinter._print_Poly", + "LatexPrinter._print_ComplexRootOf", + "LatexPrinter._print_RootSum", + "LatexPrinter._print_PolyElement", + "LatexPrinter._print_FracElement", + "LatexPrinter._print_euler", + "LatexPrinter._print_catalan", + "LatexPrinter._print_MellinTransform", + "LatexPrinter._print_InverseMellinTransform", + "LatexPrinter._print_LaplaceTransform", + "LatexPrinter._print_InverseLaplaceTransform", + "LatexPrinter._print_FourierTransform", + "LatexPrinter._print_InverseFourierTransform", + "LatexPrinter._print_SineTransform", + "LatexPrinter._print_InverseSineTransform", + "LatexPrinter._print_CosineTransform", + "LatexPrinter._print_InverseCosineTransform", + "LatexPrinter._print_DMP", + "LatexPrinter._print_Morphism", + "LatexPrinter._print_NamedMorphism", + "LatexPrinter._print_IdentityMorphism", + "LatexPrinter._print_CompositeMorphism", + "LatexPrinter._print_Category", + "LatexPrinter._print_Diagram", + "LatexPrinter._print_DiagramGrid", + "LatexPrinter._print_FreeModule", + "LatexPrinter._print_FreeModuleElement", + "LatexPrinter._print_SubModule", + "LatexPrinter._print_ModuleImplementedIdeal", + "LatexPrinter._print_Quaternion", + "LatexPrinter._print_QuotientRing", + "LatexPrinter._print_QuotientRingElement", + "LatexPrinter._print_QuotientModuleElement", + "LatexPrinter._print_QuotientModule", + "LatexPrinter._print_MatrixHomomorphism", + "LatexPrinter._print_BaseScalarField", + "LatexPrinter._print_BaseVectorField", + "LatexPrinter._print_Differential", + "LatexPrinter._print_Tr", + "LatexPrinter._print_totient", + "LatexPrinter._print_reduced_totient", + "LatexPrinter._print_divisor_sigma", + "LatexPrinter._print_udivisor_sigma", + "LatexPrinter._print_primenu", + "LatexPrinter._print_primeomega", + "translate", + "latex" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ], + "sympy/printing/tests/test_latex.py": [ + "test_latex_functions" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_Function" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "scikit-learn__scikit-learn-11281", + "repo": "scikit-learn/scikit-learn", + "base_commit": "4143356c3c51831300789e4fdf795d83716dbab6", + "problem_statement": "Should mixture models have a clusterer-compatible interface\nMixture models are currently a bit different. They are basically clusterers, except they are probabilistic, and are applied to inductive problems unlike many clusterers. But they are unlike clusterers in API:\r\n* they have an `n_components` parameter, with identical purpose to `n_clusters`\r\n* they do not store the `labels_` of the training data\r\n* they do not have a `fit_predict` method\r\n\r\nAnd they are almost entirely documented separately.\r\n\r\nShould we make the MMs more like clusterers?\n", + "golden_patch": "diff --git a/sklearn/mixture/base.py b/sklearn/mixture/base.py\n--- a/sklearn/mixture/base.py\n+++ b/sklearn/mixture/base.py\n@@ -172,7 +172,7 @@ def _initialize(self, X, resp):\n def fit(self, X, y=None):\n \"\"\"Estimate model parameters with the EM algorithm.\n \n- The method fit the model `n_init` times and set the parameters with\n+ The method fits the model `n_init` times and set the parameters with\n which the model has the largest likelihood or lower bound. Within each\n trial, the method iterates between E-step and M-step for `max_iter`\n times until the change of likelihood or lower bound is less than\n@@ -188,6 +188,32 @@ def fit(self, X, y=None):\n -------\n self\n \"\"\"\n+ self.fit_predict(X, y)\n+ return self\n+\n+ def fit_predict(self, X, y=None):\n+ \"\"\"Estimate model parameters using X and predict the labels for X.\n+\n+ The method fits the model n_init times and sets the parameters with\n+ which the model has the largest likelihood or lower bound. Within each\n+ trial, the method iterates between E-step and M-step for `max_iter`\n+ times until the change of likelihood or lower bound is less than\n+ `tol`, otherwise, a `ConvergenceWarning` is raised. After fitting, it\n+ predicts the most probable label for the input data points.\n+\n+ .. versionadded:: 0.20\n+\n+ Parameters\n+ ----------\n+ X : array-like, shape (n_samples, n_features)\n+ List of n_features-dimensional data points. Each row\n+ corresponds to a single data point.\n+\n+ Returns\n+ -------\n+ labels : array, shape (n_samples,)\n+ Component labels.\n+ \"\"\"\n X = _check_X(X, self.n_components, ensure_min_samples=2)\n self._check_initial_parameters(X)\n \n@@ -240,7 +266,7 @@ def fit(self, X, y=None):\n self._set_parameters(best_params)\n self.n_iter_ = best_n_iter\n \n- return self\n+ return log_resp.argmax(axis=1)\n \n def _e_step(self, X):\n \"\"\"E step.\n", + "test_patch": "diff --git a/sklearn/mixture/tests/test_bayesian_mixture.py b/sklearn/mixture/tests/test_bayesian_mixture.py\n--- a/sklearn/mixture/tests/test_bayesian_mixture.py\n+++ b/sklearn/mixture/tests/test_bayesian_mixture.py\n@@ -1,12 +1,16 @@\n # Author: Wei Xue \n # Thierry Guillemot \n # License: BSD 3 clause\n+import copy\n \n import numpy as np\n from scipy.special import gammaln\n \n from sklearn.utils.testing import assert_raise_message\n from sklearn.utils.testing import assert_almost_equal\n+from sklearn.utils.testing import assert_array_equal\n+\n+from sklearn.metrics.cluster import adjusted_rand_score\n \n from sklearn.mixture.bayesian_mixture import _log_dirichlet_norm\n from sklearn.mixture.bayesian_mixture import _log_wishart_norm\n@@ -14,7 +18,7 @@\n from sklearn.mixture import BayesianGaussianMixture\n \n from sklearn.mixture.tests.test_gaussian_mixture import RandomData\n-from sklearn.exceptions import ConvergenceWarning\n+from sklearn.exceptions import ConvergenceWarning, NotFittedError\n from sklearn.utils.testing import assert_greater_equal, ignore_warnings\n \n \n@@ -419,3 +423,49 @@ def test_invariant_translation():\n assert_almost_equal(bgmm1.means_, bgmm2.means_ - 100)\n assert_almost_equal(bgmm1.weights_, bgmm2.weights_)\n assert_almost_equal(bgmm1.covariances_, bgmm2.covariances_)\n+\n+\n+def test_bayesian_mixture_fit_predict():\n+ rng = np.random.RandomState(0)\n+ rand_data = RandomData(rng, scale=7)\n+ n_components = 2 * rand_data.n_components\n+\n+ for covar_type in COVARIANCE_TYPE:\n+ bgmm1 = BayesianGaussianMixture(n_components=n_components,\n+ max_iter=100, random_state=rng,\n+ tol=1e-3, reg_covar=0)\n+ bgmm1.covariance_type = covar_type\n+ bgmm2 = copy.deepcopy(bgmm1)\n+ X = rand_data.X[covar_type]\n+\n+ Y_pred1 = bgmm1.fit(X).predict(X)\n+ Y_pred2 = bgmm2.fit_predict(X)\n+ assert_array_equal(Y_pred1, Y_pred2)\n+\n+\n+def test_bayesian_mixture_predict_predict_proba():\n+ # this is the same test as test_gaussian_mixture_predict_predict_proba()\n+ rng = np.random.RandomState(0)\n+ rand_data = RandomData(rng)\n+ for prior_type in PRIOR_TYPE:\n+ for covar_type in COVARIANCE_TYPE:\n+ X = rand_data.X[covar_type]\n+ Y = rand_data.Y\n+ bgmm = BayesianGaussianMixture(\n+ n_components=rand_data.n_components,\n+ random_state=rng,\n+ weight_concentration_prior_type=prior_type,\n+ covariance_type=covar_type)\n+\n+ # Check a warning message arrive if we don't do fit\n+ assert_raise_message(NotFittedError,\n+ \"This BayesianGaussianMixture instance\"\n+ \" is not fitted yet. Call 'fit' with \"\n+ \"appropriate arguments before using \"\n+ \"this method.\", bgmm.predict, X)\n+\n+ bgmm.fit(X)\n+ Y_pred = bgmm.predict(X)\n+ Y_pred_proba = bgmm.predict_proba(X).argmax(axis=1)\n+ assert_array_equal(Y_pred, Y_pred_proba)\n+ assert_greater_equal(adjusted_rand_score(Y, Y_pred), .95)\ndiff --git a/sklearn/mixture/tests/test_gaussian_mixture.py b/sklearn/mixture/tests/test_gaussian_mixture.py\n--- a/sklearn/mixture/tests/test_gaussian_mixture.py\n+++ b/sklearn/mixture/tests/test_gaussian_mixture.py\n@@ -3,6 +3,7 @@\n # License: BSD 3 clause\n \n import sys\n+import copy\n import warnings\n \n import numpy as np\n@@ -569,6 +570,26 @@ def test_gaussian_mixture_predict_predict_proba():\n assert_greater(adjusted_rand_score(Y, Y_pred), .95)\n \n \n+def test_gaussian_mixture_fit_predict():\n+ rng = np.random.RandomState(0)\n+ rand_data = RandomData(rng)\n+ for covar_type in COVARIANCE_TYPE:\n+ X = rand_data.X[covar_type]\n+ Y = rand_data.Y\n+ g = GaussianMixture(n_components=rand_data.n_components,\n+ random_state=rng, weights_init=rand_data.weights,\n+ means_init=rand_data.means,\n+ precisions_init=rand_data.precisions[covar_type],\n+ covariance_type=covar_type)\n+\n+ # check if fit_predict(X) is equivalent to fit(X).predict(X)\n+ f = copy.deepcopy(g)\n+ Y_pred1 = f.fit(X).predict(X)\n+ Y_pred2 = g.fit_predict(X)\n+ assert_array_equal(Y_pred1, Y_pred2)\n+ assert_greater(adjusted_rand_score(Y, Y_pred2), .95)\n+\n+\n def test_gaussian_mixture_fit():\n # recover the ground truth\n rng = np.random.RandomState(0)\n", + "fail_to_pass": "[\"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_fit_predict\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_fit_predict\"]", + "pass_to_pass": "[\"sklearn/mixture/tests/test_bayesian_mixture.py::test_log_dirichlet_norm\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_log_wishart_norm\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_covariance_type\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_weight_concentration_prior_type\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_weights_prior_initialisation\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_means_prior_initialisation\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_precisions_prior_initialisation\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_check_is_fitted\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_weights\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_monotonic_likelihood\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_compare_covar_type\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_check_covariance_precision\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_invariant_translation\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_predict_predict_proba\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_attributes\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_check_X\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_check_weights\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_check_means\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_check_precisions\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_suffstat_sk_full\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_suffstat_sk_tied\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_suffstat_sk_diag\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_suffstat_sk_spherical\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_compute_log_det_cholesky\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_log_probabilities\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_estimate_log_prob_resp\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_predict_predict_proba\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_fit\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_fit_best_params\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_fit_convergence_warning\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_multiple_init\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_n_parameters\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_bic_1d_1component\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_aic_bic\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_verbose\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_warm_start\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_score\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_score_samples\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_monotonic_likelihood\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_regularisation\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_property\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_sample\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_init\"]", + "expected_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit" + ] + }, + "test_file_spans": { + "sklearn/mixture/tests/test_bayesian_mixture.py": [ + "docstring", + "imports" + ], + "sklearn/mixture/tests/test_gaussian_mixture.py": [ + "docstring", + "test_gaussian_mixture_fit" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.__init__", + "BaseMixture._e_step" + ], + "sklearn/mixture/gmm.py": [ + "_GMMBase.__init__", + "_GMMBase.predict", + "_GMMBase.fit_predict" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.__init__", + "BaseMixture._e_step" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit", + "BaseMixture", + "BaseMixture._print_verbose_msg_init_end" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit", + "BaseMixture", + "BaseMixture._print_verbose_msg_init_end" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit" + ], + "sklearn/mixture/bayesian_mixture.py": [ + "BayesianGaussianMixture._check_weights_parameters" + ], + "sklearn/mixture/gaussian_mixture.py": [] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.__init__", + "BaseMixture._e_step" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.__init__", + "BaseMixture._e_step" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit", + "BaseMixture", + "BaseMixture._print_verbose_msg_init_end" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit", + "BaseMixture", + "BaseMixture._print_verbose_msg_init_end" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit", + "BaseMixture", + "BaseMixture._print_verbose_msg_init_end" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit", + "BaseMixture", + "BaseMixture._print_verbose_msg_init_end" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.__init__", + "BaseMixture.fit" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.__init__", + "BaseMixture.fit" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.__init__", + "BaseMixture.fit", + "BaseMixture.predict" + ], + "sklearn/mixture/gaussian_mixture.py": [ + "GaussianMixture", + "GaussianMixture.__init__" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.__init__", + "BaseMixture.fit", + "BaseMixture.predict" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture", + "BaseMixture.__init__", + "BaseMixture.fit", + "BaseMixture._estimate_weighted_log_prob" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture", + "BaseMixture.__init__", + "BaseMixture.fit", + "BaseMixture._estimate_weighted_log_prob" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.__init__", + "BaseMixture._initialize_parameters" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.__init__", + "BaseMixture._initialize_parameters" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.__init__", + "BaseMixture.fit" + ], + "sklearn/mixture/bayesian_mixture.py": [ + "BayesianGaussianMixture.__init__" + ], + "sklearn/mixture/gaussian_mixture.py": [ + "GaussianMixture.__init__" + ], + "sklearn/mixture/tests/test_bayesian_mixture.py": [], + "sklearn/mixture/tests/test_gaussian_mixture.py": [] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.__init__", + "BaseMixture.fit" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240523_aider", + "spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.__init__", + "BaseMixture._e_step" + ] + } + }, + { + "run_name": "20240820_honeycomb", + "spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.__init__", + "BaseMixture._e_step" + ] + } + }, + { + "run_name": "20240912_marscode-agent-dev", + "spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.__init__", + "BaseMixture._initialize_parameters" + ] + } + } + ] + }, + { + "instance_id": "sympy__sympy-14817", + "repo": "sympy/sympy", + "base_commit": "0dbdc0ea83d339936da175f8c3a97d0d6bafb9f8", + "problem_statement": "Error pretty printing MatAdd\n```py\r\n>>> pprint(MatrixSymbol('x', n, n) + MatrixSymbol('y*', n, n))\r\nTraceback (most recent call last):\r\n File \"./sympy/core/sympify.py\", line 368, in sympify\r\n expr = parse_expr(a, local_dict=locals, transformations=transformations, evaluate=evaluate)\r\n File \"./sympy/parsing/sympy_parser.py\", line 950, in parse_expr\r\n return eval_expr(code, local_dict, global_dict)\r\n File \"./sympy/parsing/sympy_parser.py\", line 863, in eval_expr\r\n code, global_dict, local_dict) # take local objects in preference\r\n File \"\", line 1\r\n Symbol ('y' )*\r\n ^\r\nSyntaxError: unexpected EOF while parsing\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"./sympy/printing/pretty/pretty.py\", line 2371, in pretty_print\r\n use_unicode_sqrt_char=use_unicode_sqrt_char))\r\n File \"./sympy/printing/pretty/pretty.py\", line 2331, in pretty\r\n return pp.doprint(expr)\r\n File \"./sympy/printing/pretty/pretty.py\", line 62, in doprint\r\n return self._print(expr).render(**self._settings)\r\n File \"./sympy/printing/printer.py\", line 274, in _print\r\n return getattr(self, printmethod)(expr, *args, **kwargs)\r\n File \"./sympy/printing/pretty/pretty.py\", line 828, in _print_MatAdd\r\n if S(item.args[0]).is_negative:\r\n File \"./sympy/core/sympify.py\", line 370, in sympify\r\n raise SympifyError('could not parse %r' % a, exc)\r\nsympy.core.sympify.SympifyError: Sympify of expression 'could not parse 'y*'' failed, because of exception being raised:\r\nSyntaxError: unexpected EOF while parsing (, line 1)\r\n```\r\n\r\nThe code shouldn't be using sympify to handle string arguments from MatrixSymbol.\r\n\r\nI don't even understand what the code is doing. Why does it omit the `+` when the first argument is negative? This seems to assume that the arguments of MatAdd have a certain form, and that they will always print a certain way if they are negative. \n", + "golden_patch": "diff --git a/sympy/printing/pretty/pretty.py b/sympy/printing/pretty/pretty.py\n--- a/sympy/printing/pretty/pretty.py\n+++ b/sympy/printing/pretty/pretty.py\n@@ -825,7 +825,8 @@ def _print_MatAdd(self, expr):\n if s is None:\n s = pform # First element\n else:\n- if S(item.args[0]).is_negative:\n+ coeff = item.as_coeff_mmul()[0]\n+ if _coeff_isneg(S(coeff)):\n s = prettyForm(*stringPict.next(s, ' '))\n pform = self._print(item)\n else:\n", + "test_patch": "diff --git a/sympy/printing/pretty/tests/test_pretty.py b/sympy/printing/pretty/tests/test_pretty.py\n--- a/sympy/printing/pretty/tests/test_pretty.py\n+++ b/sympy/printing/pretty/tests/test_pretty.py\n@@ -6094,11 +6094,16 @@ def test_MatrixSymbol_printing():\n A = MatrixSymbol(\"A\", 3, 3)\n B = MatrixSymbol(\"B\", 3, 3)\n C = MatrixSymbol(\"C\", 3, 3)\n-\n assert pretty(-A*B*C) == \"-A*B*C\"\n assert pretty(A - B) == \"-B + A\"\n assert pretty(A*B*C - A*B - B*C) == \"-A*B -B*C + A*B*C\"\n \n+ # issue #14814\n+ x = MatrixSymbol('x', n, n)\n+ y = MatrixSymbol('y*', n, n)\n+ assert pretty(x + y) == \"x + y*\"\n+ assert pretty(-a*x + -2*y*y) == \"-a*x -2*y**y*\"\n+\n \n def test_degree_printing():\n expr1 = 90*degree\n", + "fail_to_pass": "[\"test_MatrixSymbol_printing\"]", + "pass_to_pass": "[\"test_pretty_ascii_str\", \"test_pretty_unicode_str\", \"test_upretty_greek\", \"test_upretty_multiindex\", \"test_upretty_sub_super\", \"test_upretty_subs_missing_in_24\", \"test_upretty_modifiers\", \"test_pretty_Cycle\", \"test_pretty_basic\", \"test_negative_fractions\", \"test_issue_5524\", \"test_pretty_ordering\", \"test_EulerGamma\", \"test_GoldenRatio\", \"test_pretty_relational\", \"test_Assignment\", \"test_AugmentedAssignment\", \"test_issue_7117\", \"test_pretty_rational\", \"test_pretty_functions\", \"test_pretty_sqrt\", \"test_pretty_sqrt_char_knob\", \"test_pretty_sqrt_longsymbol_no_sqrt_char\", \"test_pretty_KroneckerDelta\", \"test_pretty_product\", \"test_pretty_lambda\", \"test_pretty_order\", \"test_pretty_derivatives\", \"test_pretty_integrals\", \"test_pretty_matrix\", \"test_pretty_ndim_arrays\", \"test_tensor_TensorProduct\", \"test_diffgeom_print_WedgeProduct\", \"test_Adjoint\", \"test_pretty_Trace_issue_9044\", \"test_MatrixExpressions\", \"test_pretty_dotproduct\", \"test_pretty_piecewise\", \"test_pretty_ITE\", \"test_pretty_seq\", \"test_any_object_in_sequence\", \"test_print_builtin_set\", \"test_pretty_sets\", \"test_pretty_SetExpr\", \"test_pretty_ImageSet\", \"test_pretty_ConditionSet\", \"test_pretty_ComplexRegion\", \"test_pretty_Union_issue_10414\", \"test_pretty_Intersection_issue_10414\", \"test_ProductSet_paranthesis\", \"test_ProductSet_prod_char_issue_10413\", \"test_pretty_sequences\", \"test_pretty_FourierSeries\", \"test_pretty_FormalPowerSeries\", \"test_pretty_limits\", \"test_pretty_ComplexRootOf\", \"test_pretty_RootSum\", \"test_GroebnerBasis\", \"test_pretty_Boolean\", \"test_pretty_Domain\", \"test_pretty_prec\", \"test_pprint\", \"test_pretty_class\", \"test_pretty_no_wrap_line\", \"test_settings\", \"test_pretty_sum\", \"test_units\", \"test_pretty_Subs\", \"test_gammas\", \"test_beta\", \"test_function_subclass_different_name\", \"test_SingularityFunction\", \"test_deltas\", \"test_hyper\", \"test_meijerg\", \"test_noncommutative\", \"test_pretty_special_functions\", \"test_expint\", \"test_elliptic_functions\", \"test_RandomDomain\", \"test_PrettyPoly\", \"test_issue_6285\", \"test_issue_6359\", \"test_issue_6739\", \"test_complicated_symbol_unchanged\", \"test_categories\", \"test_PrettyModules\", \"test_QuotientRing\", \"test_Homomorphism\", \"test_Tr\", \"test_pretty_Add\", \"test_issue_7179\", \"test_issue_7180\", \"test_pretty_Complement\", \"test_pretty_SymmetricDifference\", \"test_pretty_Contains\", \"test_issue_4335\", \"test_issue_6324\", \"test_issue_7927\", \"test_issue_6134\", \"test_issue_9877\", \"test_issue_13651\", \"test_pretty_primenu\", \"test_pretty_primeomega\", \"test_pretty_Mod\", \"test_issue_11801\", \"test_pretty_UnevaluatedExpr\", \"test_issue_10472\", \"test_MatrixElement_printing\", \"test_issue_12675\", \"test_degree_printing\"]", + "expected_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_MatAdd" + ] + }, + "test_file_spans": { + "sympy/printing/pretty/tests/test_pretty.py": [ + "test_issue_12675" + ] + }, + "resolved_by": [ + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_MatAdd" + ] + }, + "alternative_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_MatAdd" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_MatAdd" + ] + }, + "alternative_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_MatAdd" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_MatAdd" + ] + }, + "alternative_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_MatAdd" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_MatAdd" + ] + }, + "alternative_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_MatAdd" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_MatAdd" + ] + }, + "alternative_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_MatAdd" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_MatAdd" + ] + }, + "alternative_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_MatAdd" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_MatAdd" + ] + }, + "alternative_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_MatAdd" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_MatAdd" + ] + }, + "alternative_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_MatAdd" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-15011", + "repo": "sympy/sympy", + "base_commit": "b7c5ba2bf3ffd5cf453b25af7c8ddd9a639800cb", + "problem_statement": "lambdify does not work with certain MatrixSymbol names even with dummify=True\n`lambdify` is happy with curly braces in a symbol name and with `MatrixSymbol`s, but not with both at the same time, even if `dummify` is `True`.\r\n\r\nHere is some basic code that gives the error.\r\n```\r\nimport sympy as sy\r\ncurlyx = sy.symbols(\"{x}\")\r\nv = sy.MatrixSymbol(\"v\", 2, 1)\r\ncurlyv = sy.MatrixSymbol(\"{v}\", 2, 1)\r\n```\r\n\r\nThe following two lines of code work:\r\n```\r\ncurlyScalarId = sy.lambdify(curlyx, curlyx)\r\nvectorId = sy.lambdify(v,v)\r\n```\r\n\r\nThe following two lines of code give a `SyntaxError`:\r\n```\r\ncurlyVectorId = sy.lambdify(curlyv, curlyv)\r\ncurlyVectorIdDummified = sy.lambdify(curlyv, curlyv, dummify=True)\r\n```\r\n\r\n\n", + "golden_patch": "diff --git a/sympy/utilities/lambdify.py b/sympy/utilities/lambdify.py\n--- a/sympy/utilities/lambdify.py\n+++ b/sympy/utilities/lambdify.py\n@@ -700,14 +700,13 @@ def _is_safe_ident(cls, ident):\n return isinstance(ident, str) and cls._safe_ident_re.match(ident) \\\n and not (keyword.iskeyword(ident) or ident == 'None')\n \n-\n def _preprocess(self, args, expr):\n \"\"\"Preprocess args, expr to replace arguments that do not map\n to valid Python identifiers.\n \n Returns string form of args, and updated expr.\n \"\"\"\n- from sympy import Dummy, Symbol, Function, flatten\n+ from sympy import Dummy, Symbol, MatrixSymbol, Function, flatten\n from sympy.matrices import DeferredVector\n \n dummify = self._dummify\n@@ -725,7 +724,7 @@ def _preprocess(self, args, expr):\n argstrs.append(nested_argstrs)\n elif isinstance(arg, DeferredVector):\n argstrs.append(str(arg))\n- elif isinstance(arg, Symbol):\n+ elif isinstance(arg, Symbol) or isinstance(arg, MatrixSymbol):\n argrep = self._argrepr(arg)\n \n if dummify or not self._is_safe_ident(argrep):\n@@ -739,7 +738,14 @@ def _preprocess(self, args, expr):\n argstrs.append(self._argrepr(dummy))\n expr = self._subexpr(expr, {arg: dummy})\n else:\n- argstrs.append(str(arg))\n+ argrep = self._argrepr(arg)\n+\n+ if dummify:\n+ dummy = Dummy()\n+ argstrs.append(self._argrepr(dummy))\n+ expr = self._subexpr(expr, {arg: dummy})\n+ else:\n+ argstrs.append(str(arg))\n \n return argstrs, expr\n \n", + "test_patch": "diff --git a/sympy/utilities/tests/test_lambdify.py b/sympy/utilities/tests/test_lambdify.py\n--- a/sympy/utilities/tests/test_lambdify.py\n+++ b/sympy/utilities/tests/test_lambdify.py\n@@ -728,6 +728,14 @@ def test_dummification():\n raises(SyntaxError, lambda: lambdify(2 * F(t), 2 * F(t) + 5))\n raises(SyntaxError, lambda: lambdify(2 * F(t), 4 * F(t) + 5))\n \n+def test_curly_matrix_symbol():\n+ # Issue #15009\n+ curlyv = sympy.MatrixSymbol(\"{v}\", 2, 1)\n+ lam = lambdify(curlyv, curlyv)\n+ assert lam(1)==1\n+ lam = lambdify(curlyv, curlyv, dummify=True)\n+ assert lam(1)==1\n+\n def test_python_keywords():\n # Test for issue 7452. The automatic dummification should ensure use of\n # Python reserved keywords as symbol names will create valid lambda\n", + "fail_to_pass": "[\"test_curly_matrix_symbol\"]", + "pass_to_pass": "[\"test_no_args\", \"test_single_arg\", \"test_list_args\", \"test_nested_args\", \"test_str_args\", \"test_own_namespace_1\", \"test_own_namespace_2\", \"test_own_module\", \"test_bad_args\", \"test_atoms\", \"test_sympy_lambda\", \"test_math_lambda\", \"test_mpmath_lambda\", \"test_number_precision\", \"test_mpmath_precision\", \"test_math_transl\", \"test_mpmath_transl\", \"test_exponentiation\", \"test_sqrt\", \"test_trig\", \"test_vector_simple\", \"test_vector_discontinuous\", \"test_trig_symbolic\", \"test_trig_float\", \"test_docs\", \"test_math\", \"test_sin\", \"test_matrix\", \"test_issue9474\", \"test_integral\", \"test_sym_single_arg\", \"test_sym_list_args\", \"test_sym_integral\", \"test_namespace_order\", \"test_namespace_type\", \"test_imps\", \"test_imps_errors\", \"test_imps_wrong_args\", \"test_lambdify_imps\", \"test_dummification\", \"test_python_keywords\", \"test_lambdify_docstring\", \"test_special_printers\", \"test_true_false\", \"test_issue_2790\", \"test_issue_12092\", \"test_ITE\", \"test_Min_Max\", \"test_issue_12173\", \"test_sinc_mpmath\", \"test_lambdify_dummy_arg\", \"test_lambdify_mixed_symbol_dummy_args\", \"test_lambdify_inspect\"]", + "expected_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + }, + "test_file_spans": { + "sympy/utilities/tests/test_lambdify.py": [ + "test_python_keywords" + ] + }, + "resolved_by": [ + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + }, + "alternative_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + }, + "alternative_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess", + "_EvaluatorPrinter" + ] + }, + "alternative_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess", + "_EvaluatorPrinter" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + }, + "alternative_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + }, + "alternative_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ], + "sympy/utilities/tests/test_lambdify.py": [] + }, + "alternative_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + }, + "alternative_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + }, + "alternative_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + }, + "alternative_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess", + "_EvaluatorPrinter._subexpr" + ] + }, + "alternative_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess", + "_EvaluatorPrinter._subexpr" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + }, + "alternative_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + }, + "alternative_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sympy/matrices/expressions/matexpr.py": [ + "MatrixSymbol.name" + ], + "sympy/utilities/lambdify.py": [ + "lambdify" + ] + }, + "alternative_spans": { + "sympy/utilities/lambdify.py": [ + "lambdify" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + }, + "alternative_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sympy/utilities/lambdify.py": [ + "lambdify", + "_EvaluatorPrinter._preprocess" + ] + }, + "alternative_spans": { + "sympy/utilities/lambdify.py": [ + "lambdify", + "_EvaluatorPrinter._preprocess" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + }, + "alternative_spans": { + "sympy/utilities/lambdify.py": [ + "_EvaluatorPrinter._preprocess" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240617_moatless_gpt4o", + "spans": { + "sympy/utilities/lambdify.py": [ + "lambdify" + ] + } + } + ] + }, + { + "instance_id": "astropy__astropy-7746", + "repo": "astropy/astropy", + "base_commit": "d5bd3f68bb6d5ce3a61bdce9883ee750d1afade5", + "problem_statement": "Issue when passing empty lists/arrays to WCS transformations\nThe following should not fail but instead should return empty lists/arrays:\r\n\r\n```\r\nIn [1]: from astropy.wcs import WCS\r\n\r\nIn [2]: wcs = WCS('2MASS_h.fits')\r\n\r\nIn [3]: wcs.wcs_pix2world([], [], 0)\r\n---------------------------------------------------------------------------\r\nInconsistentAxisTypesError Traceback (most recent call last)\r\n in ()\r\n----> 1 wcs.wcs_pix2world([], [], 0)\r\n\r\n~/Dropbox/Code/Astropy/astropy/astropy/wcs/wcs.py in wcs_pix2world(self, *args, **kwargs)\r\n 1352 return self._array_converter(\r\n 1353 lambda xy, o: self.wcs.p2s(xy, o)['world'],\r\n-> 1354 'output', *args, **kwargs)\r\n 1355 wcs_pix2world.__doc__ = \"\"\"\r\n 1356 Transforms pixel coordinates to world coordinates by doing\r\n\r\n~/Dropbox/Code/Astropy/astropy/astropy/wcs/wcs.py in _array_converter(self, func, sky, ra_dec_order, *args)\r\n 1267 \"a 1-D array for each axis, followed by an origin.\")\r\n 1268 \r\n-> 1269 return _return_list_of_arrays(axes, origin)\r\n 1270 \r\n 1271 raise TypeError(\r\n\r\n~/Dropbox/Code/Astropy/astropy/astropy/wcs/wcs.py in _return_list_of_arrays(axes, origin)\r\n 1223 if ra_dec_order and sky == 'input':\r\n 1224 xy = self._denormalize_sky(xy)\r\n-> 1225 output = func(xy, origin)\r\n 1226 if ra_dec_order and sky == 'output':\r\n 1227 output = self._normalize_sky(output)\r\n\r\n~/Dropbox/Code/Astropy/astropy/astropy/wcs/wcs.py in (xy, o)\r\n 1351 raise ValueError(\"No basic WCS settings were created.\")\r\n 1352 return self._array_converter(\r\n-> 1353 lambda xy, o: self.wcs.p2s(xy, o)['world'],\r\n 1354 'output', *args, **kwargs)\r\n 1355 wcs_pix2world.__doc__ = \"\"\"\r\n\r\nInconsistentAxisTypesError: ERROR 4 in wcsp2s() at line 2646 of file cextern/wcslib/C/wcs.c:\r\nncoord and/or nelem inconsistent with the wcsprm.\r\n```\n", + "golden_patch": "diff --git a/astropy/wcs/wcs.py b/astropy/wcs/wcs.py\n--- a/astropy/wcs/wcs.py\n+++ b/astropy/wcs/wcs.py\n@@ -1212,6 +1212,9 @@ def _array_converter(self, func, sky, *args, ra_dec_order=False):\n \"\"\"\n \n def _return_list_of_arrays(axes, origin):\n+ if any([x.size == 0 for x in axes]):\n+ return axes\n+\n try:\n axes = np.broadcast_arrays(*axes)\n except ValueError:\n@@ -1235,6 +1238,8 @@ def _return_single_array(xy, origin):\n raise ValueError(\n \"When providing two arguments, the array must be \"\n \"of shape (N, {0})\".format(self.naxis))\n+ if 0 in xy.shape:\n+ return xy\n if ra_dec_order and sky == 'input':\n xy = self._denormalize_sky(xy)\n result = func(xy, origin)\n", + "test_patch": "diff --git a/astropy/wcs/tests/test_wcs.py b/astropy/wcs/tests/test_wcs.py\n--- a/astropy/wcs/tests/test_wcs.py\n+++ b/astropy/wcs/tests/test_wcs.py\n@@ -1093,3 +1093,21 @@ def test_keyedsip():\n assert isinstance( w.sip, wcs.Sip )\n assert w.sip.crpix[0] == 2048\n assert w.sip.crpix[1] == 1026\n+\n+\n+def test_zero_size_input():\n+ with fits.open(get_pkg_data_filename('data/sip.fits')) as f:\n+ w = wcs.WCS(f[0].header)\n+\n+ inp = np.zeros((0, 2))\n+ assert_array_equal(inp, w.all_pix2world(inp, 0))\n+ assert_array_equal(inp, w.all_world2pix(inp, 0))\n+\n+ inp = [], [1]\n+ result = w.all_pix2world([], [1], 0)\n+ assert_array_equal(inp[0], result[0])\n+ assert_array_equal(inp[1], result[1])\n+\n+ result = w.all_world2pix([], [1], 0)\n+ assert_array_equal(inp[0], result[0])\n+ assert_array_equal(inp[1], result[1])\n", + "fail_to_pass": "[\"astropy/wcs/tests/test_wcs.py::test_zero_size_input\"]", + "pass_to_pass": "[\"astropy/wcs/tests/test_wcs.py::TestMaps::test_consistency\", \"astropy/wcs/tests/test_wcs.py::TestMaps::test_maps\", \"astropy/wcs/tests/test_wcs.py::TestSpectra::test_consistency\", \"astropy/wcs/tests/test_wcs.py::TestSpectra::test_spectra\", \"astropy/wcs/tests/test_wcs.py::test_fixes\", \"astropy/wcs/tests/test_wcs.py::test_outside_sky\", \"astropy/wcs/tests/test_wcs.py::test_pix2world\", \"astropy/wcs/tests/test_wcs.py::test_load_fits_path\", \"astropy/wcs/tests/test_wcs.py::test_dict_init\", \"astropy/wcs/tests/test_wcs.py::test_extra_kwarg\", \"astropy/wcs/tests/test_wcs.py::test_3d_shapes\", \"astropy/wcs/tests/test_wcs.py::test_preserve_shape\", \"astropy/wcs/tests/test_wcs.py::test_broadcasting\", \"astropy/wcs/tests/test_wcs.py::test_shape_mismatch\", \"astropy/wcs/tests/test_wcs.py::test_invalid_shape\", \"astropy/wcs/tests/test_wcs.py::test_warning_about_defunct_keywords\", \"astropy/wcs/tests/test_wcs.py::test_warning_about_defunct_keywords_exception\", \"astropy/wcs/tests/test_wcs.py::test_to_header_string\", \"astropy/wcs/tests/test_wcs.py::test_to_fits\", \"astropy/wcs/tests/test_wcs.py::test_to_header_warning\", \"astropy/wcs/tests/test_wcs.py::test_no_comments_in_header\", \"astropy/wcs/tests/test_wcs.py::test_find_all_wcs_crash\", \"astropy/wcs/tests/test_wcs.py::test_validate\", \"astropy/wcs/tests/test_wcs.py::test_validate_with_2_wcses\", \"astropy/wcs/tests/test_wcs.py::test_crpix_maps_to_crval\", \"astropy/wcs/tests/test_wcs.py::test_all_world2pix\", \"astropy/wcs/tests/test_wcs.py::test_scamp_sip_distortion_parameters\", \"astropy/wcs/tests/test_wcs.py::test_fixes2\", \"astropy/wcs/tests/test_wcs.py::test_unit_normalization\", \"astropy/wcs/tests/test_wcs.py::test_footprint_to_file\", \"astropy/wcs/tests/test_wcs.py::test_validate_faulty_wcs\", \"astropy/wcs/tests/test_wcs.py::test_error_message\", \"astropy/wcs/tests/test_wcs.py::test_out_of_bounds\", \"astropy/wcs/tests/test_wcs.py::test_calc_footprint_1\", \"astropy/wcs/tests/test_wcs.py::test_calc_footprint_2\", \"astropy/wcs/tests/test_wcs.py::test_calc_footprint_3\", \"astropy/wcs/tests/test_wcs.py::test_sip\", \"astropy/wcs/tests/test_wcs.py::test_printwcs\", \"astropy/wcs/tests/test_wcs.py::test_invalid_spherical\", \"astropy/wcs/tests/test_wcs.py::test_no_iteration\", \"astropy/wcs/tests/test_wcs.py::test_sip_tpv_agreement\", \"astropy/wcs/tests/test_wcs.py::test_tpv_copy\", \"astropy/wcs/tests/test_wcs.py::test_hst_wcs\", \"astropy/wcs/tests/test_wcs.py::test_list_naxis\", \"astropy/wcs/tests/test_wcs.py::test_sip_broken\", \"astropy/wcs/tests/test_wcs.py::test_no_truncate_crval\", \"astropy/wcs/tests/test_wcs.py::test_no_truncate_crval_try2\", \"astropy/wcs/tests/test_wcs.py::test_no_truncate_crval_p17\", \"astropy/wcs/tests/test_wcs.py::test_no_truncate_using_compare\", \"astropy/wcs/tests/test_wcs.py::test_passing_ImageHDU\", \"astropy/wcs/tests/test_wcs.py::test_inconsistent_sip\", \"astropy/wcs/tests/test_wcs.py::test_bounds_check\", \"astropy/wcs/tests/test_wcs.py::test_naxis\", \"astropy/wcs/tests/test_wcs.py::test_sip_with_altkey\", \"astropy/wcs/tests/test_wcs.py::test_to_fits_1\", \"astropy/wcs/tests/test_wcs.py::test_keyedsip\"]", + "expected_spans": { + "astropy/wcs/wcs.py": [ + "WCS._array_converter" + ] + }, + "test_file_spans": { + "astropy/wcs/tests/test_wcs.py": [] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-15308", + "repo": "sympy/sympy", + "base_commit": "fb59d703e6863ed803c98177b59197b5513332e9", + "problem_statement": "LaTeX printing for Matrix Expression\n```py\r\n>>> A = MatrixSymbol(\"A\", n, n)\r\n>>> latex(trace(A**2))\r\n'Trace(A**2)'\r\n```\r\n\r\nThe bad part is not only is Trace not recognized, but whatever printer is being used doesn't fallback to the LaTeX printer for the inner expression (it should be `A^2`). \n", + "golden_patch": "diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -289,6 +289,10 @@ def _do_exponent(self, expr, exp):\n else:\n return expr\n \n+ def _print_Basic(self, expr):\n+ l = [self._print(o) for o in expr.args]\n+ return self._deal_with_super_sub(expr.__class__.__name__) + r\"\\left(%s\\right)\" % \", \".join(l)\n+\n def _print_bool(self, e):\n return r\"\\mathrm{%s}\" % e\n \n@@ -1462,6 +1466,10 @@ def _print_Transpose(self, expr):\n else:\n return \"%s^T\" % self._print(mat)\n \n+ def _print_Trace(self, expr):\n+ mat = expr.arg\n+ return r\"\\mathrm{tr}\\left (%s \\right )\" % self._print(mat)\n+\n def _print_Adjoint(self, expr):\n mat = expr.arg\n from sympy.matrices import MatrixSymbol\n", + "test_patch": "diff --git a/sympy/printing/tests/test_latex.py b/sympy/printing/tests/test_latex.py\n--- a/sympy/printing/tests/test_latex.py\n+++ b/sympy/printing/tests/test_latex.py\n@@ -1866,3 +1866,35 @@ def test_latex_printer_tensor():\n \n expr = TensorElement(K(i,j,-k,-l), {i:3})\n assert latex(expr) == 'K{}^{i=3,j}{}_{kl}'\n+\n+\n+def test_trace():\n+ # Issue 15303\n+ from sympy import trace\n+ A = MatrixSymbol(\"A\", 2, 2)\n+ assert latex(trace(A)) == r\"\\mathrm{tr}\\left (A \\right )\"\n+ assert latex(trace(A**2)) == r\"\\mathrm{tr}\\left (A^{2} \\right )\"\n+\n+\n+def test_print_basic():\n+ # Issue 15303\n+ from sympy import Basic, Expr\n+\n+ # dummy class for testing printing where the function is not implemented in latex.py\n+ class UnimplementedExpr(Expr):\n+ def __new__(cls, e):\n+ return Basic.__new__(cls, e)\n+\n+ # dummy function for testing\n+ def unimplemented_expr(expr):\n+ return UnimplementedExpr(expr).doit()\n+\n+ # override class name to use superscript / subscript\n+ def unimplemented_expr_sup_sub(expr):\n+ result = UnimplementedExpr(expr)\n+ result.__class__.__name__ = 'UnimplementedExpr_x^1'\n+ return result\n+\n+ assert latex(unimplemented_expr(x)) == r'UnimplementedExpr\\left(x\\right)'\n+ assert latex(unimplemented_expr(x**2)) == r'UnimplementedExpr\\left(x^{2}\\right)'\n+ assert latex(unimplemented_expr_sup_sub(x)) == r'UnimplementedExpr^{1}_{x}\\left(x\\right)'\n", + "fail_to_pass": "[\"test_trace\"]", + "pass_to_pass": "[\"test_printmethod\", \"test_latex_basic\", \"test_latex_builtins\", \"test_latex_SingularityFunction\", \"test_latex_cycle\", \"test_latex_permutation\", \"test_latex_Float\", \"test_latex_vector_expressions\", \"test_latex_symbols\", \"test_latex_functions\", \"test_function_subclass_different_name\", \"test_hyper_printing\", \"test_latex_bessel\", \"test_latex_fresnel\", \"test_latex_brackets\", \"test_latex_indexed\", \"test_latex_derivatives\", \"test_latex_subs\", \"test_latex_integrals\", \"test_latex_sets\", \"test_latex_SetExpr\", \"test_latex_Range\", \"test_latex_sequences\", \"test_latex_FourierSeries\", \"test_latex_FormalPowerSeries\", \"test_latex_intervals\", \"test_latex_AccumuBounds\", \"test_latex_emptyset\", \"test_latex_commutator\", \"test_latex_union\", \"test_latex_symmetric_difference\", \"test_latex_Complement\", \"test_latex_Complexes\", \"test_latex_productset\", \"test_latex_Naturals\", \"test_latex_Naturals0\", \"test_latex_Integers\", \"test_latex_ImageSet\", \"test_latex_ConditionSet\", \"test_latex_ComplexRegion\", \"test_latex_Contains\", \"test_latex_sum\", \"test_latex_product\", \"test_latex_limits\", \"test_latex_log\", \"test_issue_3568\", \"test_latex\", \"test_latex_dict\", \"test_latex_list\", \"test_latex_rational\", \"test_latex_inverse\", \"test_latex_DiracDelta\", \"test_latex_Heaviside\", \"test_latex_KroneckerDelta\", \"test_latex_LeviCivita\", \"test_mode\", \"test_latex_Piecewise\", \"test_latex_Matrix\", \"test_latex_matrix_with_functions\", \"test_latex_NDimArray\", \"test_latex_mul_symbol\", \"test_latex_issue_4381\", \"test_latex_issue_4576\", \"test_latex_pow_fraction\", \"test_noncommutative\", \"test_latex_order\", \"test_latex_Lambda\", \"test_latex_PolyElement\", \"test_latex_FracElement\", \"test_latex_Poly\", \"test_latex_Poly_order\", \"test_latex_ComplexRootOf\", \"test_latex_RootSum\", \"test_settings\", \"test_latex_numbers\", \"test_latex_euler\", \"test_lamda\", \"test_custom_symbol_names\", \"test_matAdd\", \"test_matMul\", \"test_latex_MatrixSlice\", \"test_latex_RandomDomain\", \"test_PrettyPoly\", \"test_integral_transforms\", \"test_PolynomialRingBase\", \"test_categories\", \"test_Modules\", \"test_QuotientRing\", \"test_Tr\", \"test_Adjoint\", \"test_Hadamard\", \"test_ZeroMatrix\", \"test_boolean_args_order\", \"test_imaginary\", \"test_builtins_without_args\", \"test_latex_greek_functions\", \"test_translate\", \"test_other_symbols\", \"test_modifiers\", \"test_greek_symbols\", \"test_builtin_no_args\", \"test_issue_6853\", \"test_Mul\", \"test_Pow\", \"test_issue_7180\", \"test_issue_8409\", \"test_issue_7117\", \"test_issue_2934\", \"test_issue_10489\", \"test_issue_12886\", \"test_issue_13651\", \"test_latex_UnevaluatedExpr\", \"test_MatrixElement_printing\", \"test_MatrixSymbol_printing\", \"test_Quaternion_latex_printing\", \"test_TensorProduct_printing\", \"test_WedgeProduct_printing\", \"test_issue_14041\", \"test_issue_9216\", \"test_latex_printer_tensor\"]", + "expected_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_bool", + "LatexPrinter._print_Adjoint" + ] + }, + "test_file_spans": { + "sympy/printing/tests/test_latex.py": [] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-15345", + "repo": "sympy/sympy", + "base_commit": "9ef28fba5b4d6d0168237c9c005a550e6dc27d81", + "problem_statement": "mathematica_code gives wrong output with Max\nIf I run the code\r\n\r\n```\r\nx = symbols('x')\r\nmathematica_code(Max(x,2))\r\n```\r\n\r\nthen I would expect the output `'Max[x,2]'` which is valid Mathematica code but instead I get `'Max(2, x)'` which is not valid Mathematica code.\n", + "golden_patch": "diff --git a/sympy/printing/mathematica.py b/sympy/printing/mathematica.py\n--- a/sympy/printing/mathematica.py\n+++ b/sympy/printing/mathematica.py\n@@ -31,7 +31,8 @@\n \"asech\": [(lambda x: True, \"ArcSech\")],\n \"acsch\": [(lambda x: True, \"ArcCsch\")],\n \"conjugate\": [(lambda x: True, \"Conjugate\")],\n-\n+ \"Max\": [(lambda *x: True, \"Max\")],\n+ \"Min\": [(lambda *x: True, \"Min\")],\n }\n \n \n@@ -101,6 +102,8 @@ def _print_Function(self, expr):\n return \"%s[%s]\" % (mfunc, self.stringify(expr.args, \", \"))\n return expr.func.__name__ + \"[%s]\" % self.stringify(expr.args, \", \")\n \n+ _print_MinMaxBase = _print_Function\n+\n def _print_Integral(self, expr):\n if len(expr.variables) == 1 and not expr.limits[0][1:]:\n args = [expr.args[0], expr.variables[0]]\n", + "test_patch": "diff --git a/sympy/printing/tests/test_mathematica.py b/sympy/printing/tests/test_mathematica.py\n--- a/sympy/printing/tests/test_mathematica.py\n+++ b/sympy/printing/tests/test_mathematica.py\n@@ -2,7 +2,7 @@\n Rational, Integer, Tuple, Derivative)\n from sympy.integrals import Integral\n from sympy.concrete import Sum\n-from sympy.functions import exp, sin, cos, conjugate\n+from sympy.functions import exp, sin, cos, conjugate, Max, Min\n \n from sympy import mathematica_code as mcode\n \n@@ -28,6 +28,7 @@ def test_Function():\n assert mcode(f(x, y, z)) == \"f[x, y, z]\"\n assert mcode(sin(x) ** cos(x)) == \"Sin[x]^Cos[x]\"\n assert mcode(conjugate(x)) == \"Conjugate[x]\"\n+ assert mcode(Max(x,y,z)*Min(y,z)) == \"Max[x, y, z]*Min[y, z]\"\n \n \n def test_Pow():\n", + "fail_to_pass": "[\"test_Function\"]", + "pass_to_pass": "[\"test_Integer\", \"test_Rational\", \"test_Pow\", \"test_Mul\", \"test_constants\", \"test_containers\", \"test_Integral\", \"test_Derivative\"]", + "expected_spans": { + "sympy/printing/mathematica.py": [ + "imports", + "MCodePrinter._print_Integral" + ] + }, + "test_file_spans": { + "sympy/printing/tests/test_mathematica.py": [ + "imports", + "test_Function" + ] + }, + "resolved_by": [ + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sympy/printing/mathematica.py": [ + "imports", + "MCodePrinter._print_Integral" + ] + }, + "alternative_spans": { + "sympy/printing/mathematica.py": [ + "imports", + "MCodePrinter._print_Integral" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "sympy/printing/mathematica.py": [ + "MCodePrinter:13", + "MCodePrinter._print_Function" + ] + }, + "alternative_spans": { + "sympy/printing/mathematica.py": [ + "MCodePrinter:13", + "MCodePrinter._print_Function" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240509_amazon-q-developer-agent-20240430-dev", + "spans": { + "sympy/printing/mathematica.py": [ + "MCodePrinter:13", + "MCodePrinter._print_Function" + ] + } + } + ] + }, + { + "instance_id": "sympy__sympy-15346", + "repo": "sympy/sympy", + "base_commit": "9ef28fba5b4d6d0168237c9c005a550e6dc27d81", + "problem_statement": "can't simplify sin/cos with Rational?\nlatest cloned sympy, python 3 on windows\r\nfirstly, cos, sin with symbols can be simplified; rational number can be simplified\r\n```python\r\nfrom sympy import *\r\n\r\nx, y = symbols('x, y', real=True)\r\nr = sin(x)*sin(y) + cos(x)*cos(y)\r\nprint(r)\r\nprint(r.simplify())\r\nprint()\r\n\r\nr = Rational(1, 50) - Rational(1, 25)\r\nprint(r)\r\nprint(r.simplify())\r\nprint()\r\n```\r\nsays\r\n```cmd\r\nsin(x)*sin(y) + cos(x)*cos(y)\r\ncos(x - y)\r\n\r\n-1/50\r\n-1/50\r\n```\r\n\r\nbut\r\n```python\r\nt1 = Matrix([sin(Rational(1, 50)), cos(Rational(1, 50)), 0])\r\nt2 = Matrix([sin(Rational(1, 25)), cos(Rational(1, 25)), 0])\r\nr = t1.dot(t2)\r\nprint(r)\r\nprint(r.simplify())\r\nprint()\r\n\r\nr = sin(Rational(1, 50))*sin(Rational(1, 25)) + cos(Rational(1, 50))*cos(Rational(1, 25))\r\nprint(r)\r\nprint(r.simplify())\r\nprint()\r\n\r\nprint(acos(r))\r\nprint(acos(r).simplify())\r\nprint()\r\n```\r\nsays\r\n```cmd\r\nsin(1/50)*sin(1/25) + cos(1/50)*cos(1/25)\r\nsin(1/50)*sin(1/25) + cos(1/50)*cos(1/25)\r\n\r\nsin(1/50)*sin(1/25) + cos(1/50)*cos(1/25)\r\nsin(1/50)*sin(1/25) + cos(1/50)*cos(1/25)\r\n\r\nacos(sin(1/50)*sin(1/25) + cos(1/50)*cos(1/25))\r\nacos(sin(1/50)*sin(1/25) + cos(1/50)*cos(1/25))\r\n```\r\n\r\n\n", + "golden_patch": "diff --git a/sympy/simplify/trigsimp.py b/sympy/simplify/trigsimp.py\n--- a/sympy/simplify/trigsimp.py\n+++ b/sympy/simplify/trigsimp.py\n@@ -1143,8 +1143,8 @@ def _futrig(e, **kwargs):\n lambda x: _eapply(factor, x, trigs),\n TR14, # factored powers of identities\n [identity, lambda x: _eapply(_mexpand, x, trigs)],\n- TRmorrie,\n TR10i, # sin-cos products > sin-cos of sums\n+ TRmorrie,\n [identity, TR8], # sin-cos products -> sin-cos of sums\n [identity, lambda x: TR2i(TR2(x))], # tan -> sin-cos -> tan\n [\n", + "test_patch": "diff --git a/sympy/simplify/tests/test_trigsimp.py b/sympy/simplify/tests/test_trigsimp.py\n--- a/sympy/simplify/tests/test_trigsimp.py\n+++ b/sympy/simplify/tests/test_trigsimp.py\n@@ -1,7 +1,8 @@\n from sympy import (\n symbols, sin, simplify, cos, trigsimp, rad, tan, exptrigsimp,sinh,\n cosh, diff, cot, Subs, exp, tanh, exp, S, integrate, I,Matrix,\n- Symbol, coth, pi, log, count_ops, sqrt, E, expand, Piecewise)\n+ Symbol, coth, pi, log, count_ops, sqrt, E, expand, Piecewise , Rational\n+ )\n \n from sympy.core.compatibility import long\n from sympy.utilities.pytest import XFAIL\n@@ -357,6 +358,14 @@ def test_issue_2827_trigsimp_methods():\n eq = 1/sqrt(E) + E\n assert exptrigsimp(eq) == eq\n \n+def test_issue_15129_trigsimp_methods():\n+ t1 = Matrix([sin(Rational(1, 50)), cos(Rational(1, 50)), 0])\n+ t2 = Matrix([sin(Rational(1, 25)), cos(Rational(1, 25)), 0])\n+ t3 = Matrix([cos(Rational(1, 25)), sin(Rational(1, 25)), 0])\n+ r1 = t1.dot(t2)\n+ r2 = t1.dot(t3)\n+ assert trigsimp(r1) == cos(S(1)/50)\n+ assert trigsimp(r2) == sin(S(3)/50)\n \n def test_exptrigsimp():\n def valid(a, b):\n", + "fail_to_pass": "[\"test_issue_15129_trigsimp_methods\"]", + "pass_to_pass": "[\"test_trigsimp1\", \"test_trigsimp1a\", \"test_trigsimp2\", \"test_issue_4373\", \"test_trigsimp3\", \"test_issue_4661\", \"test_issue_4494\", \"test_issue_5948\", \"test_issue_4775\", \"test_issue_4280\", \"test_issue_3210\", \"test_trigsimp_issues\", \"test_trigsimp_issue_2515\", \"test_trigsimp_issue_3826\", \"test_trigsimp_issue_4032\", \"test_trigsimp_issue_7761\", \"test_trigsimp_noncommutative\", \"test_hyperbolic_simp\", \"test_trigsimp_groebner\", \"test_issue_2827_trigsimp_methods\", \"test_exptrigsimp\", \"test_powsimp_on_numbers\"]", + "expected_spans": { + "sympy/simplify/trigsimp.py": [ + "_futrig" + ] + }, + "test_file_spans": { + "sympy/simplify/tests/test_trigsimp.py": [ + "imports", + "test_issue_2827_trigsimp_methods" + ] + }, + "resolved_by": [ + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sympy/simplify/trigsimp.py": [ + "imports", + "trigsimp" + ] + }, + "alternative_spans": { + "sympy/simplify/trigsimp.py": [ + "imports", + "trigsimp" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240912_marscode-agent-dev", + "spans": { + "sympy/simplify/trigsimp.py": [ + "imports", + "trigsimp" + ] + } + } + ] + }, + { + "instance_id": "scikit-learn__scikit-learn-12471", + "repo": "scikit-learn/scikit-learn", + "base_commit": "02dc9ed680e7f53f1b0d410dcdd37341c7958eb1", + "problem_statement": "OneHotEncoder ignore unknown error when categories are strings \n#### Description\r\n\r\nThis bug is very specific, but it happens when you set OneHotEncoder to ignore unknown entries.\r\nand your labels are strings. The memory of the arrays is not handled safely and it can lead to a ValueError\r\n\r\nBasically, when you call the transform method it will sets all the unknown strings on your array to OneHotEncoder.categories_[i][0] which is the first category alphabetically sorted given for fit\r\nIf this OneHotEncoder.categories_[i][0] is a long string, and the array that you want to transform has small strings, then it is impossible to fit the whole OneHotEncoder.categories_[i][0] into the entries of the array we want to transform. So OneHotEncoder.categories_[i][0] is truncated and this raise the ValueError.\r\n\r\n\r\n\r\n#### Steps/Code to Reproduce\r\n```\r\n\r\nimport numpy as np\r\nfrom sklearn.preprocessing import OneHotEncoder\r\n\r\n\r\n# It needs to be numpy arrays, the error does not appear \r\n# is you have lists of lists because it gets treated like an array of objects.\r\ntrain = np.array([ '22','333','4444','11111111' ]).reshape((-1,1))\r\ntest = np.array([ '55555', '22' ]).reshape((-1,1))\r\n\r\nohe = OneHotEncoder(dtype=bool,handle_unknown='ignore')\r\n\r\nohe.fit( train )\r\nenc_test = ohe.transform( test )\r\n\r\n```\r\n\r\n\r\n#### Expected Results\r\nHere we should get an sparse matrix 2x4 false everywhere except at (1,1) the '22' that is known\r\n\r\n#### Actual Results\r\n\r\n> ValueError: y contains previously unseen labels: ['111111']\r\n\r\n\r\n#### Versions\r\nSystem:\r\n python: 2.7.12 (default, Dec 4 2017, 14:50:18) [GCC 5.4.0 20160609]\r\n machine: Linux-4.4.0-138-generic-x86_64-with-Ubuntu-16.04-xenial\r\nexecutable: /usr/bin/python\r\n\r\nBLAS:\r\n macros: HAVE_CBLAS=None\r\ncblas_libs: openblas, openblas\r\n lib_dirs: /usr/lib\r\n\r\nPython deps:\r\n Cython: 0.25.2\r\n scipy: 0.18.1\r\nsetuptools: 36.7.0\r\n pip: 9.0.1\r\n numpy: 1.15.2\r\n pandas: 0.19.1\r\n sklearn: 0.21.dev0\r\n\r\n\r\n\r\n#### Comments\r\n\r\nI already implemented a fix for this issue, where I check the size of the elements in the array before, and I cast them into objects if necessary.\n", + "golden_patch": "diff --git a/sklearn/preprocessing/_encoders.py b/sklearn/preprocessing/_encoders.py\n--- a/sklearn/preprocessing/_encoders.py\n+++ b/sklearn/preprocessing/_encoders.py\n@@ -110,7 +110,14 @@ def _transform(self, X, handle_unknown='error'):\n # continue `The rows are marked `X_mask` and will be\n # removed later.\n X_mask[:, i] = valid_mask\n- Xi = Xi.copy()\n+ # cast Xi into the largest string type necessary\n+ # to handle different lengths of numpy strings\n+ if (self.categories_[i].dtype.kind in ('U', 'S')\n+ and self.categories_[i].itemsize > Xi.itemsize):\n+ Xi = Xi.astype(self.categories_[i].dtype)\n+ else:\n+ Xi = Xi.copy()\n+\n Xi[~valid_mask] = self.categories_[i][0]\n _, encoded = _encode(Xi, self.categories_[i], encode=True)\n X_int[:, i] = encoded\n", + "test_patch": "diff --git a/sklearn/preprocessing/tests/test_encoders.py b/sklearn/preprocessing/tests/test_encoders.py\n--- a/sklearn/preprocessing/tests/test_encoders.py\n+++ b/sklearn/preprocessing/tests/test_encoders.py\n@@ -273,6 +273,23 @@ def test_one_hot_encoder_no_categorical_features():\n assert enc.categories_ == []\n \n \n+def test_one_hot_encoder_handle_unknown_strings():\n+ X = np.array(['11111111', '22', '333', '4444']).reshape((-1, 1))\n+ X2 = np.array(['55555', '22']).reshape((-1, 1))\n+ # Non Regression test for the issue #12470\n+ # Test the ignore option, when categories are numpy string dtype\n+ # particularly when the known category strings are larger\n+ # than the unknown category strings\n+ oh = OneHotEncoder(handle_unknown='ignore')\n+ oh.fit(X)\n+ X2_passed = X2.copy()\n+ assert_array_equal(\n+ oh.transform(X2_passed).toarray(),\n+ np.array([[0., 0., 0., 0.], [0., 1., 0., 0.]]))\n+ # ensure transformed data was not modified in place\n+ assert_array_equal(X2, X2_passed)\n+\n+\n @pytest.mark.parametrize(\"output_dtype\", [np.int32, np.float32, np.float64])\n @pytest.mark.parametrize(\"input_dtype\", [np.int32, np.float32, np.float64])\n def test_one_hot_encoder_dtype(input_dtype, output_dtype):\n", + "fail_to_pass": "[\"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_handle_unknown_strings\"]", + "pass_to_pass": "[\"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_sparse\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_dense\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_deprecationwarnings\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_force_new_behaviour\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_categorical_features\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_handle_unknown\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_not_fitted\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_no_categorical_features\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_dtype[int32-int32]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_dtype[int32-float32]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_dtype[int32-float64]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_dtype[float32-int32]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_dtype[float32-float32]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_dtype[float32-float64]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_dtype[float64-int32]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_dtype[float64-float32]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_dtype[float64-float64]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_dtype_pandas[int32]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_dtype_pandas[float32]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_dtype_pandas[float64]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_set_params\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder[mixed]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder[numeric]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder[object]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_inverse\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_categories[mixed]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_categories[numeric]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_categories[object]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_categories[string]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_specified_categories[object]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_specified_categories[numeric]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_specified_categories[object-string-cat]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_unsorted_categories\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_specified_categories_mixed_columns\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_pandas\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_feature_names\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_feature_names_unicode\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_raise_missing[error-numeric]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_raise_missing[error-object]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_raise_missing[ignore-numeric]\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_raise_missing[ignore-object]\", \"sklearn/preprocessing/tests/test_encoders.py::test_ordinal_encoder[mixed]\", \"sklearn/preprocessing/tests/test_encoders.py::test_ordinal_encoder[numeric]\", \"sklearn/preprocessing/tests/test_encoders.py::test_ordinal_encoder[object]\", \"sklearn/preprocessing/tests/test_encoders.py::test_ordinal_encoder_specified_categories[object]\", \"sklearn/preprocessing/tests/test_encoders.py::test_ordinal_encoder_specified_categories[numeric]\", \"sklearn/preprocessing/tests/test_encoders.py::test_ordinal_encoder_specified_categories[object-string-cat]\", \"sklearn/preprocessing/tests/test_encoders.py::test_ordinal_encoder_inverse\", \"sklearn/preprocessing/tests/test_encoders.py::test_ordinal_encoder_raise_missing[numeric]\", \"sklearn/preprocessing/tests/test_encoders.py::test_ordinal_encoder_raise_missing[object]\", \"sklearn/preprocessing/tests/test_encoders.py::test_encoder_dtypes\", \"sklearn/preprocessing/tests/test_encoders.py::test_encoder_dtypes_pandas\", \"sklearn/preprocessing/tests/test_encoders.py::test_one_hot_encoder_warning\"]", + "expected_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + }, + "test_file_spans": { + "sklearn/preprocessing/tests/test_encoders.py": [ + "test_one_hot_encoder_dtype" + ] + }, + "resolved_by": [ + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform", + "OneHotEncoder.fit_transform", + "OneHotEncoder._legacy_transform" + ] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform", + "OneHotEncoder.fit_transform", + "OneHotEncoder._legacy_transform" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "docstring", + "imports", + "_BaseEncoder._check_X", + "_BaseEncoder._fit", + "_BaseEncoder._transform", + "OneHotEncoder.__init__", + "OneHotEncoder.active_features_", + "OneHotEncoder.feature_indices_", + "OneHotEncoder.n_values_", + "OneHotEncoder._handle_deprecations", + "OneHotEncoder.fit", + "OneHotEncoder._legacy_fit_transform", + "OneHotEncoder.fit_transform", + "OneHotEncoder._legacy_transform", + "OneHotEncoder._transform_new", + "OneHotEncoder.transform", + "OneHotEncoder.inverse_transform", + "OneHotEncoder.get_feature_names", + "OrdinalEncoder.fit", + "OrdinalEncoder.inverse_transform" + ] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "docstring", + "imports", + "_BaseEncoder._check_X", + "_BaseEncoder._fit", + "_BaseEncoder._transform", + "OneHotEncoder.__init__", + "OneHotEncoder.active_features_", + "OneHotEncoder.feature_indices_", + "OneHotEncoder.n_values_", + "OneHotEncoder._handle_deprecations", + "OneHotEncoder.fit", + "OneHotEncoder._legacy_fit_transform", + "OneHotEncoder.fit_transform", + "OneHotEncoder._legacy_transform", + "OneHotEncoder._transform_new", + "OneHotEncoder.transform", + "OneHotEncoder.inverse_transform", + "OneHotEncoder.get_feature_names", + "OrdinalEncoder.fit", + "OrdinalEncoder.inverse_transform" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ], + "sklearn/preprocessing/label.py": [ + "_encode_numpy", + "_encode_python", + "_encode" + ] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "OneHotEncoder._transform_new" + ] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "OneHotEncoder._transform_new" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "OneHotEncoder._transform_new" + ] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "OneHotEncoder._transform_new" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "OneHotEncoder._transform_new" + ] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "OneHotEncoder._transform_new" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ], + "sklearn/preprocessing/tests/test_encoders.py": [] + }, + "alternative_spans": { + "sklearn/preprocessing/_encoders.py": [ + "_BaseEncoder._transform" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240829_Isoform", + "spans": { + "sklearn/preprocessing/_encoders.py": [ + "OneHotEncoder._transform_new" + ] + } + }, + { + "run_name": "20240621_autocoderover-v20240620", + "spans": { + "sklearn/preprocessing/_encoders.py": [ + "OneHotEncoder._transform_new" + ] + } + }, + { + "run_name": "20240617_moatless_gpt4o", + "spans": { + "sklearn/preprocessing/_encoders.py": [ + "OneHotEncoder._transform_new" + ] + } + } + ] + }, + { + "instance_id": "sympy__sympy-15609", + "repo": "sympy/sympy", + "base_commit": "15f56f3b0006d2ed2c29bde3c43e91618012c849", + "problem_statement": "Indexed matrix-expression LaTeX printer is not compilable\n```python\r\ni, j, k = symbols(\"i j k\")\r\nM = MatrixSymbol(\"M\", k, k)\r\nN = MatrixSymbol(\"N\", k, k)\r\nlatex((M*N)[i, j])\r\n```\r\n\r\nThe LaTeX string produced by the last command is:\r\n```\r\n\\sum_{i_{1}=0}^{k - 1} M_{i, _i_1} N_{_i_1, j}\r\n```\r\nLaTeX complains about a double subscript `_`. This expression won't render in MathJax either.\n", + "golden_patch": "diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -1438,7 +1438,10 @@ def _print_MatrixBase(self, expr):\n \n def _print_MatrixElement(self, expr):\n return self.parenthesize(expr.parent, PRECEDENCE[\"Atom\"], strict=True) \\\n- + '_{%s, %s}' % (expr.i, expr.j)\n+ + '_{%s, %s}' % (\n+ self._print(expr.i),\n+ self._print(expr.j)\n+ )\n \n def _print_MatrixSlice(self, expr):\n def latexslice(x):\n", + "test_patch": "diff --git a/sympy/printing/tests/test_latex.py b/sympy/printing/tests/test_latex.py\n--- a/sympy/printing/tests/test_latex.py\n+++ b/sympy/printing/tests/test_latex.py\n@@ -1738,6 +1738,11 @@ def test_MatrixElement_printing():\n F = C[0, 0].subs(C, A - B)\n assert latex(F) == r\"\\left(A - B\\right)_{0, 0}\"\n \n+ i, j, k = symbols(\"i j k\")\n+ M = MatrixSymbol(\"M\", k, k)\n+ N = MatrixSymbol(\"N\", k, k)\n+ assert latex((M*N)[i, j]) == r'\\sum_{i_{1}=0}^{k - 1} M_{i, i_{1}} N_{i_{1}, j}'\n+\n \n def test_MatrixSymbol_printing():\n # test cases for issue #14237\n", + "fail_to_pass": "[\"test_MatrixElement_printing\"]", + "pass_to_pass": "[\"test_printmethod\", \"test_latex_basic\", \"test_latex_builtins\", \"test_latex_SingularityFunction\", \"test_latex_cycle\", \"test_latex_permutation\", \"test_latex_Float\", \"test_latex_vector_expressions\", \"test_latex_symbols\", \"test_latex_functions\", \"test_function_subclass_different_name\", \"test_hyper_printing\", \"test_latex_bessel\", \"test_latex_fresnel\", \"test_latex_brackets\", \"test_latex_indexed\", \"test_latex_derivatives\", \"test_latex_subs\", \"test_latex_integrals\", \"test_latex_sets\", \"test_latex_SetExpr\", \"test_latex_Range\", \"test_latex_sequences\", \"test_latex_FourierSeries\", \"test_latex_FormalPowerSeries\", \"test_latex_intervals\", \"test_latex_AccumuBounds\", \"test_latex_emptyset\", \"test_latex_commutator\", \"test_latex_union\", \"test_latex_symmetric_difference\", \"test_latex_Complement\", \"test_latex_Complexes\", \"test_latex_productset\", \"test_latex_Naturals\", \"test_latex_Naturals0\", \"test_latex_Integers\", \"test_latex_ImageSet\", \"test_latex_ConditionSet\", \"test_latex_ComplexRegion\", \"test_latex_Contains\", \"test_latex_sum\", \"test_latex_product\", \"test_latex_limits\", \"test_latex_log\", \"test_issue_3568\", \"test_latex\", \"test_latex_dict\", \"test_latex_list\", \"test_latex_rational\", \"test_latex_inverse\", \"test_latex_DiracDelta\", \"test_latex_Heaviside\", \"test_latex_KroneckerDelta\", \"test_latex_LeviCivita\", \"test_mode\", \"test_latex_Piecewise\", \"test_latex_Matrix\", \"test_latex_matrix_with_functions\", \"test_latex_NDimArray\", \"test_latex_mul_symbol\", \"test_latex_issue_4381\", \"test_latex_issue_4576\", \"test_latex_pow_fraction\", \"test_noncommutative\", \"test_latex_order\", \"test_latex_Lambda\", \"test_latex_PolyElement\", \"test_latex_FracElement\", \"test_latex_Poly\", \"test_latex_Poly_order\", \"test_latex_ComplexRootOf\", \"test_latex_RootSum\", \"test_settings\", \"test_latex_numbers\", \"test_latex_euler\", \"test_lamda\", \"test_custom_symbol_names\", \"test_matAdd\", \"test_matMul\", \"test_latex_MatrixSlice\", \"test_latex_RandomDomain\", \"test_PrettyPoly\", \"test_integral_transforms\", \"test_PolynomialRingBase\", \"test_categories\", \"test_Modules\", \"test_QuotientRing\", \"test_Tr\", \"test_Adjoint\", \"test_Hadamard\", \"test_ZeroMatrix\", \"test_boolean_args_order\", \"test_imaginary\", \"test_builtins_without_args\", \"test_latex_greek_functions\", \"test_translate\", \"test_other_symbols\", \"test_modifiers\", \"test_greek_symbols\", \"test_builtin_no_args\", \"test_issue_6853\", \"test_Mul\", \"test_Pow\", \"test_issue_7180\", \"test_issue_8409\", \"test_issue_7117\", \"test_issue_15439\", \"test_issue_2934\", \"test_issue_10489\", \"test_issue_12886\", \"test_issue_13651\", \"test_latex_UnevaluatedExpr\", \"test_MatrixSymbol_printing\", \"test_Quaternion_latex_printing\", \"test_TensorProduct_printing\", \"test_WedgeProduct_printing\", \"test_issue_14041\", \"test_issue_9216\", \"test_latex_printer_tensor\", \"test_trace\"]", + "expected_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + }, + "test_file_spans": { + "sympy/printing/tests/test_latex.py": [ + "test_MatrixElement_printing" + ] + }, + "resolved_by": [ + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sympy/printing/latex.py": [ + "docstring", + "imports", + "impl:5", + "impl:11", + "LatexPrinter", + "LatexPrinter.__init__", + "LatexPrinter.parenthesize", + "LatexPrinter.doprint", + "LatexPrinter._needs_brackets", + "LatexPrinter._needs_function_brackets", + "LatexPrinter._needs_mul_brackets", + "LatexPrinter._needs_add_brackets", + "LatexPrinter._do_exponent", + "LatexPrinter._print_Basic", + "LatexPrinter._print_bool", + "LatexPrinter:6", + "LatexPrinter._print_NoneType", + "LatexPrinter._print_Add", + "LatexPrinter._print_Cycle", + "LatexPrinter:10", + "LatexPrinter._print_Float", + "LatexPrinter._print_Cross", + "LatexPrinter._print_Curl", + "LatexPrinter._print_Divergence", + "LatexPrinter._print_Dot", + "LatexPrinter._print_Gradient", + "LatexPrinter._print_Mul", + "LatexPrinter._print_Pow", + "LatexPrinter._print_UnevaluatedExpr", + "LatexPrinter._print_Sum", + "LatexPrinter._print_Product", + "LatexPrinter._print_BasisDependent", + "LatexPrinter._print_Indexed", + "LatexPrinter._print_Derivative", + "LatexPrinter._print_Subs", + "LatexPrinter._print_Integral", + "LatexPrinter._print_Limit", + "LatexPrinter._hprint_Function", + "LatexPrinter._print_Function", + "LatexPrinter._special_function_classes", + "LatexPrinter._print_Lambda", + "LatexPrinter._hprint_variadic_function", + "LatexPrinter:12", + "LatexPrinter._print_floor", + "LatexPrinter._print_ceiling", + "LatexPrinter._print_log", + "LatexPrinter._print_Abs", + "LatexPrinter:15", + "LatexPrinter._print_re", + "LatexPrinter._print_im", + "LatexPrinter._print_Not", + "LatexPrinter._print_LogOp", + "LatexPrinter._print_And", + "LatexPrinter._print_Or", + "LatexPrinter._print_Xor", + "LatexPrinter._print_Implies", + "LatexPrinter._print_Equivalent", + "LatexPrinter._print_conjugate", + "LatexPrinter._print_polar_lift", + "LatexPrinter._print_ExpBase", + "LatexPrinter._print_elliptic_k", + "LatexPrinter._print_elliptic_f", + "LatexPrinter._print_elliptic_e", + "LatexPrinter._print_elliptic_pi", + "LatexPrinter._print_beta", + "LatexPrinter._print_uppergamma", + "LatexPrinter._print_lowergamma", + "LatexPrinter._hprint_one_arg_func", + "LatexPrinter:17", + "LatexPrinter._print_Chi", + "LatexPrinter._print_expint", + "LatexPrinter._print_fresnels", + "LatexPrinter._print_fresnelc", + "LatexPrinter._print_subfactorial", + "LatexPrinter._print_factorial", + "LatexPrinter._print_factorial2", + "LatexPrinter._print_binomial", + "LatexPrinter._print_RisingFactorial", + "LatexPrinter._print_FallingFactorial", + "LatexPrinter._hprint_BesselBase", + "LatexPrinter._hprint_vec", + "LatexPrinter._hprint_airy", + "LatexPrinter._hprint_airy_prime", + "LatexPrinter._print_hyper", + "LatexPrinter._print_meijerg", + "LatexPrinter._print_dirichlet_eta", + "LatexPrinter._print_zeta", + "LatexPrinter._print_lerchphi", + "LatexPrinter._print_polylog", + "LatexPrinter._print_jacobi", + "LatexPrinter._print_gegenbauer", + "LatexPrinter._print_chebyshevt", + "LatexPrinter._print_chebyshevu", + "LatexPrinter._print_legendre", + "LatexPrinter._print_assoc_legendre", + "LatexPrinter._print_hermite", + "LatexPrinter._print_laguerre", + "LatexPrinter._print_assoc_laguerre", + "LatexPrinter._print_Ynm", + "LatexPrinter._print_Znm", + "LatexPrinter._print_Rational", + "LatexPrinter._print_Order", + "LatexPrinter._print_Symbol", + "LatexPrinter:19", + "LatexPrinter._deal_with_super_sub", + "LatexPrinter._print_Relational", + "LatexPrinter._print_Piecewise", + "LatexPrinter._print_MatrixBase", + "LatexPrinter:23", + "LatexPrinter._print_MatrixElement", + "LatexPrinter._print_MatrixSlice", + "LatexPrinter._print_Transpose", + "LatexPrinter._print_Trace", + "LatexPrinter._print_Adjoint", + "LatexPrinter._print_MatMul", + "LatexPrinter._print_Mod", + "LatexPrinter._print_HadamardProduct", + "LatexPrinter._print_KroneckerProduct", + "LatexPrinter._print_MatPow", + "LatexPrinter._print_ZeroMatrix", + "LatexPrinter._print_Identity", + "LatexPrinter._print_NDimArray", + "LatexPrinter._printer_tensor_indices", + "LatexPrinter._print_TensMul", + "LatexPrinter._print_TensorIndex", + "LatexPrinter._print_tuple", + "LatexPrinter._print_TensorProduct", + "LatexPrinter._print_WedgeProduct", + "LatexPrinter._print_Tuple", + "LatexPrinter._print_list", + "LatexPrinter._print_dict", + "LatexPrinter._print_Dict", + "LatexPrinter._print_DiracDelta", + "LatexPrinter._print_SingularityFunction", + "LatexPrinter._print_Heaviside", + "LatexPrinter._print_KroneckerDelta", + "LatexPrinter._print_LeviCivita", + "LatexPrinter._print_ProductSet", + "LatexPrinter._print_RandomDomain", + "LatexPrinter._print_set", + "LatexPrinter:35", + "LatexPrinter._print_Range", + "LatexPrinter._print_SeqFormula", + "LatexPrinter:37", + "LatexPrinter._print_Interval", + "LatexPrinter._print_AccumulationBounds", + "LatexPrinter._print_Union", + "LatexPrinter._print_Complement", + "LatexPrinter._print_Intersection", + "LatexPrinter._print_SymmetricDifference", + "LatexPrinter._print_EmptySet", + "LatexPrinter._print_Naturals", + "LatexPrinter._print_Naturals0", + "LatexPrinter._print_Integers", + "LatexPrinter._print_Reals", + "LatexPrinter._print_Complexes", + "LatexPrinter._print_ImageSet", + "LatexPrinter._print_ConditionSet", + "LatexPrinter._print_ComplexRegion", + "LatexPrinter._print_Contains", + "LatexPrinter._print_FourierSeries", + "LatexPrinter._print_FormalPowerSeries", + "LatexPrinter._print_FiniteField", + "LatexPrinter._print_IntegerRing", + "LatexPrinter._print_RationalField", + "LatexPrinter._print_RealField", + "LatexPrinter._print_ComplexField", + "LatexPrinter._print_PolynomialRing", + "LatexPrinter._print_FractionField", + "LatexPrinter._print_PolynomialRingBase", + "LatexPrinter._print_Poly", + "LatexPrinter._print_ComplexRootOf", + "LatexPrinter._print_RootSum", + "LatexPrinter._print_PolyElement", + "LatexPrinter._print_FracElement", + "LatexPrinter._print_euler", + "LatexPrinter._print_catalan", + "LatexPrinter._print_MellinTransform", + "LatexPrinter._print_InverseMellinTransform", + "LatexPrinter._print_LaplaceTransform", + "LatexPrinter._print_InverseLaplaceTransform", + "LatexPrinter._print_FourierTransform", + "LatexPrinter._print_InverseFourierTransform", + "LatexPrinter._print_SineTransform", + "LatexPrinter._print_InverseSineTransform", + "LatexPrinter._print_CosineTransform", + "LatexPrinter._print_InverseCosineTransform", + "LatexPrinter._print_DMP", + "LatexPrinter._print_Morphism", + "LatexPrinter._print_NamedMorphism", + "LatexPrinter._print_IdentityMorphism", + "LatexPrinter._print_CompositeMorphism", + "LatexPrinter._print_Category", + "LatexPrinter._print_Diagram", + "LatexPrinter._print_DiagramGrid", + "LatexPrinter._print_FreeModule", + "LatexPrinter._print_FreeModuleElement", + "LatexPrinter._print_SubModule", + "LatexPrinter._print_ModuleImplementedIdeal", + "LatexPrinter._print_Quaternion", + "LatexPrinter._print_QuotientRing", + "LatexPrinter._print_QuotientRingElement", + "LatexPrinter._print_QuotientModuleElement", + "LatexPrinter._print_QuotientModule", + "LatexPrinter._print_MatrixHomomorphism", + "LatexPrinter._print_BaseScalarField", + "LatexPrinter._print_BaseVectorField", + "LatexPrinter._print_Differential", + "LatexPrinter._print_Tr", + "LatexPrinter._print_totient", + "LatexPrinter._print_reduced_totient", + "LatexPrinter._print_divisor_sigma", + "LatexPrinter._print_udivisor_sigma", + "LatexPrinter._print_primenu", + "LatexPrinter._print_primeomega", + "translate", + "latex" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "docstring", + "imports", + "impl:5", + "impl:11", + "LatexPrinter", + "LatexPrinter.__init__", + "LatexPrinter.parenthesize", + "LatexPrinter.doprint", + "LatexPrinter._needs_brackets", + "LatexPrinter._needs_function_brackets", + "LatexPrinter._needs_mul_brackets", + "LatexPrinter._needs_add_brackets", + "LatexPrinter._do_exponent", + "LatexPrinter._print_Basic", + "LatexPrinter._print_bool", + "LatexPrinter:6", + "LatexPrinter._print_NoneType", + "LatexPrinter._print_Add", + "LatexPrinter._print_Cycle", + "LatexPrinter:10", + "LatexPrinter._print_Float", + "LatexPrinter._print_Cross", + "LatexPrinter._print_Curl", + "LatexPrinter._print_Divergence", + "LatexPrinter._print_Dot", + "LatexPrinter._print_Gradient", + "LatexPrinter._print_Mul", + "LatexPrinter._print_Pow", + "LatexPrinter._print_UnevaluatedExpr", + "LatexPrinter._print_Sum", + "LatexPrinter._print_Product", + "LatexPrinter._print_BasisDependent", + "LatexPrinter._print_Indexed", + "LatexPrinter._print_Derivative", + "LatexPrinter._print_Subs", + "LatexPrinter._print_Integral", + "LatexPrinter._print_Limit", + "LatexPrinter._hprint_Function", + "LatexPrinter._print_Function", + "LatexPrinter._special_function_classes", + "LatexPrinter._print_Lambda", + "LatexPrinter._hprint_variadic_function", + "LatexPrinter:12", + "LatexPrinter._print_floor", + "LatexPrinter._print_ceiling", + "LatexPrinter._print_log", + "LatexPrinter._print_Abs", + "LatexPrinter:15", + "LatexPrinter._print_re", + "LatexPrinter._print_im", + "LatexPrinter._print_Not", + "LatexPrinter._print_LogOp", + "LatexPrinter._print_And", + "LatexPrinter._print_Or", + "LatexPrinter._print_Xor", + "LatexPrinter._print_Implies", + "LatexPrinter._print_Equivalent", + "LatexPrinter._print_conjugate", + "LatexPrinter._print_polar_lift", + "LatexPrinter._print_ExpBase", + "LatexPrinter._print_elliptic_k", + "LatexPrinter._print_elliptic_f", + "LatexPrinter._print_elliptic_e", + "LatexPrinter._print_elliptic_pi", + "LatexPrinter._print_beta", + "LatexPrinter._print_uppergamma", + "LatexPrinter._print_lowergamma", + "LatexPrinter._hprint_one_arg_func", + "LatexPrinter:17", + "LatexPrinter._print_Chi", + "LatexPrinter._print_expint", + "LatexPrinter._print_fresnels", + "LatexPrinter._print_fresnelc", + "LatexPrinter._print_subfactorial", + "LatexPrinter._print_factorial", + "LatexPrinter._print_factorial2", + "LatexPrinter._print_binomial", + "LatexPrinter._print_RisingFactorial", + "LatexPrinter._print_FallingFactorial", + "LatexPrinter._hprint_BesselBase", + "LatexPrinter._hprint_vec", + "LatexPrinter._hprint_airy", + "LatexPrinter._hprint_airy_prime", + "LatexPrinter._print_hyper", + "LatexPrinter._print_meijerg", + "LatexPrinter._print_dirichlet_eta", + "LatexPrinter._print_zeta", + "LatexPrinter._print_lerchphi", + "LatexPrinter._print_polylog", + "LatexPrinter._print_jacobi", + "LatexPrinter._print_gegenbauer", + "LatexPrinter._print_chebyshevt", + "LatexPrinter._print_chebyshevu", + "LatexPrinter._print_legendre", + "LatexPrinter._print_assoc_legendre", + "LatexPrinter._print_hermite", + "LatexPrinter._print_laguerre", + "LatexPrinter._print_assoc_laguerre", + "LatexPrinter._print_Ynm", + "LatexPrinter._print_Znm", + "LatexPrinter._print_Rational", + "LatexPrinter._print_Order", + "LatexPrinter._print_Symbol", + "LatexPrinter:19", + "LatexPrinter._deal_with_super_sub", + "LatexPrinter._print_Relational", + "LatexPrinter._print_Piecewise", + "LatexPrinter._print_MatrixBase", + "LatexPrinter:23", + "LatexPrinter._print_MatrixElement", + "LatexPrinter._print_MatrixSlice", + "LatexPrinter._print_Transpose", + "LatexPrinter._print_Trace", + "LatexPrinter._print_Adjoint", + "LatexPrinter._print_MatMul", + "LatexPrinter._print_Mod", + "LatexPrinter._print_HadamardProduct", + "LatexPrinter._print_KroneckerProduct", + "LatexPrinter._print_MatPow", + "LatexPrinter._print_ZeroMatrix", + "LatexPrinter._print_Identity", + "LatexPrinter._print_NDimArray", + "LatexPrinter._printer_tensor_indices", + "LatexPrinter._print_TensMul", + "LatexPrinter._print_TensorIndex", + "LatexPrinter._print_tuple", + "LatexPrinter._print_TensorProduct", + "LatexPrinter._print_WedgeProduct", + "LatexPrinter._print_Tuple", + "LatexPrinter._print_list", + "LatexPrinter._print_dict", + "LatexPrinter._print_Dict", + "LatexPrinter._print_DiracDelta", + "LatexPrinter._print_SingularityFunction", + "LatexPrinter._print_Heaviside", + "LatexPrinter._print_KroneckerDelta", + "LatexPrinter._print_LeviCivita", + "LatexPrinter._print_ProductSet", + "LatexPrinter._print_RandomDomain", + "LatexPrinter._print_set", + "LatexPrinter:35", + "LatexPrinter._print_Range", + "LatexPrinter._print_SeqFormula", + "LatexPrinter:37", + "LatexPrinter._print_Interval", + "LatexPrinter._print_AccumulationBounds", + "LatexPrinter._print_Union", + "LatexPrinter._print_Complement", + "LatexPrinter._print_Intersection", + "LatexPrinter._print_SymmetricDifference", + "LatexPrinter._print_EmptySet", + "LatexPrinter._print_Naturals", + "LatexPrinter._print_Naturals0", + "LatexPrinter._print_Integers", + "LatexPrinter._print_Reals", + "LatexPrinter._print_Complexes", + "LatexPrinter._print_ImageSet", + "LatexPrinter._print_ConditionSet", + "LatexPrinter._print_ComplexRegion", + "LatexPrinter._print_Contains", + "LatexPrinter._print_FourierSeries", + "LatexPrinter._print_FormalPowerSeries", + "LatexPrinter._print_FiniteField", + "LatexPrinter._print_IntegerRing", + "LatexPrinter._print_RationalField", + "LatexPrinter._print_RealField", + "LatexPrinter._print_ComplexField", + "LatexPrinter._print_PolynomialRing", + "LatexPrinter._print_FractionField", + "LatexPrinter._print_PolynomialRingBase", + "LatexPrinter._print_Poly", + "LatexPrinter._print_ComplexRootOf", + "LatexPrinter._print_RootSum", + "LatexPrinter._print_PolyElement", + "LatexPrinter._print_FracElement", + "LatexPrinter._print_euler", + "LatexPrinter._print_catalan", + "LatexPrinter._print_MellinTransform", + "LatexPrinter._print_InverseMellinTransform", + "LatexPrinter._print_LaplaceTransform", + "LatexPrinter._print_InverseLaplaceTransform", + "LatexPrinter._print_FourierTransform", + "LatexPrinter._print_InverseFourierTransform", + "LatexPrinter._print_SineTransform", + "LatexPrinter._print_InverseSineTransform", + "LatexPrinter._print_CosineTransform", + "LatexPrinter._print_InverseCosineTransform", + "LatexPrinter._print_DMP", + "LatexPrinter._print_Morphism", + "LatexPrinter._print_NamedMorphism", + "LatexPrinter._print_IdentityMorphism", + "LatexPrinter._print_CompositeMorphism", + "LatexPrinter._print_Category", + "LatexPrinter._print_Diagram", + "LatexPrinter._print_DiagramGrid", + "LatexPrinter._print_FreeModule", + "LatexPrinter._print_FreeModuleElement", + "LatexPrinter._print_SubModule", + "LatexPrinter._print_ModuleImplementedIdeal", + "LatexPrinter._print_Quaternion", + "LatexPrinter._print_QuotientRing", + "LatexPrinter._print_QuotientRingElement", + "LatexPrinter._print_QuotientModuleElement", + "LatexPrinter._print_QuotientModule", + "LatexPrinter._print_MatrixHomomorphism", + "LatexPrinter._print_BaseScalarField", + "LatexPrinter._print_BaseVectorField", + "LatexPrinter._print_Differential", + "LatexPrinter._print_Tr", + "LatexPrinter._print_totient", + "LatexPrinter._print_reduced_totient", + "LatexPrinter._print_divisor_sigma", + "LatexPrinter._print_udivisor_sigma", + "LatexPrinter._print_primenu", + "LatexPrinter._print_primeomega", + "translate", + "latex" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + }, + "alternative_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_MatrixElement" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-15678", + "repo": "sympy/sympy", + "base_commit": "31c68eef3ffef39e2e792b0ec92cd92b7010eb2a", + "problem_statement": "Some issues with idiff\nidiff doesn't support Eq, and it also doesn't support f(x) instead of y. Both should be easy to correct.\r\n\r\n```\r\n>>> idiff(Eq(y*exp(y), x*exp(x)), y, x)\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"./sympy/geometry/util.py\", line 582, in idiff\r\n yp = solve(eq.diff(x), dydx)[0].subs(derivs)\r\nIndexError: list index out of range\r\n>>> idiff(f(x)*exp(f(x)) - x*exp(x), f(x), x)\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"./sympy/geometry/util.py\", line 574, in idiff\r\n raise ValueError(\"expecting x-dependent symbol(s) but got: %s\" % y)\r\nValueError: expecting x-dependent symbol(s) but got: f(x)\r\n>>> idiff(y*exp(y)- x*exp(x), y, x)\r\n(x + 1)*exp(x - y)/(y + 1)\r\n```\n", + "golden_patch": "diff --git a/sympy/geometry/util.py b/sympy/geometry/util.py\n--- a/sympy/geometry/util.py\n+++ b/sympy/geometry/util.py\n@@ -570,12 +570,19 @@ def idiff(eq, y, x, n=1):\n y = y[0]\n elif isinstance(y, Symbol):\n dep = {y}\n+ elif isinstance(y, Function):\n+ pass\n else:\n- raise ValueError(\"expecting x-dependent symbol(s) but got: %s\" % y)\n+ raise ValueError(\"expecting x-dependent symbol(s) or function(s) but got: %s\" % y)\n \n f = dict([(s, Function(\n s.name)(x)) for s in eq.free_symbols if s != x and s in dep])\n- dydx = Function(y.name)(x).diff(x)\n+\n+ if isinstance(y, Symbol):\n+ dydx = Function(y.name)(x).diff(x)\n+ else:\n+ dydx = y.diff(x)\n+\n eq = eq.subs(f)\n derivs = {}\n for i in range(n):\n", + "test_patch": "diff --git a/sympy/geometry/tests/test_util.py b/sympy/geometry/tests/test_util.py\n--- a/sympy/geometry/tests/test_util.py\n+++ b/sympy/geometry/tests/test_util.py\n@@ -1,5 +1,5 @@\n-from sympy import Symbol, sqrt, Derivative, S\n-from sympy.geometry import Point, Point2D, Line, Circle ,Polygon, Segment, convex_hull, intersection, centroid\n+from sympy import Symbol, sqrt, Derivative, S, Function, exp\n+from sympy.geometry import Point, Point2D, Line, Circle, Polygon, Segment, convex_hull, intersection, centroid\n from sympy.geometry.util import idiff, closest_points, farthest_points, _ordered_points\n from sympy.solvers.solvers import solve\n from sympy.utilities.pytest import raises\n@@ -9,6 +9,8 @@ def test_idiff():\n x = Symbol('x', real=True)\n y = Symbol('y', real=True)\n t = Symbol('t', real=True)\n+ f = Function('f')\n+ g = Function('g')\n # the use of idiff in ellipse also provides coverage\n circ = x**2 + y**2 - 4\n ans = -3*x*(x**2 + y**2)/y**5\n@@ -19,6 +21,10 @@ def test_idiff():\n assert ans.subs(y, solve(circ, y)[0]).equals(explicit)\n assert True in [sol.diff(x, 3).equals(explicit) for sol in solve(circ, y)]\n assert idiff(x + t + y, [y, t], x) == -Derivative(t, x) - 1\n+ assert idiff(f(x) * exp(f(x)) - x * exp(x), f(x), x) == (x + 1) * exp(x - f(x))/(f(x) + 1)\n+ assert idiff(f(x) - y * exp(x), [f(x), y], x) == (y + Derivative(y, x)) * exp(x)\n+ assert idiff(f(x) - y * exp(x), [y, f(x)], x) == -y + exp(-x) * Derivative(f(x), x)\n+ assert idiff(f(x) - g(x), [f(x), g(x)], x) == Derivative(g(x), x)\n \n \n def test_intersection():\n", + "fail_to_pass": "[\"test_idiff\"]", + "pass_to_pass": "[\"test_intersection\", \"test_convex_hull\", \"test_centroid\"]", + "expected_spans": { + "sympy/geometry/util.py": [ + "idiff" + ] + }, + "test_file_spans": { + "sympy/geometry/tests/test_util.py": [ + "imports", + "test_idiff" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "sympy/geometry/util.py": [ + "imports", + "are_coplanar", + "idiff" + ] + }, + "alternative_spans": { + "sympy/geometry/util.py": [ + "imports", + "are_coplanar", + "idiff" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/core/symbol.py": [ + "Symbol._diff_wrt" + ], + "sympy/geometry/util.py": [ + "idiff" + ] + }, + "alternative_spans": { + "sympy/geometry/util.py": [ + "idiff" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sympy/geometry/util.py": [ + "imports", + "idiff" + ] + }, + "alternative_spans": { + "sympy/geometry/util.py": [ + "imports", + "idiff" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sympy/geometry/util.py": [ + "idiff" + ] + }, + "alternative_spans": { + "sympy/geometry/util.py": [ + "idiff" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sympy/geometry/util.py": [ + "imports", + "find", + "are_coplanar", + "are_similar", + "centroid", + "closest_points", + "convex_hull", + "farthest_points", + "idiff", + "intersection" + ] + }, + "alternative_spans": { + "sympy/geometry/util.py": [ + "imports", + "find", + "are_coplanar", + "are_similar", + "centroid", + "closest_points", + "convex_hull", + "farthest_points", + "idiff", + "intersection" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sympy/geometry/tests/test_util.py": [ + "imports", + "test_idiff" + ], + "sympy/geometry/util.py": [ + "idiff" + ] + }, + "alternative_spans": { + "sympy/geometry/util.py": [ + "idiff" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "sympy/geometry/util.py": [ + "docstring", + "idiff" + ] + }, + "alternative_spans": { + "sympy/geometry/util.py": [ + "docstring", + "idiff" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sympy/geometry/util.py": [ + "idiff" + ] + }, + "alternative_spans": { + "sympy/geometry/util.py": [ + "idiff" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sympy/geometry/util.py": [ + "idiff" + ] + }, + "alternative_spans": { + "sympy/geometry/util.py": [ + "idiff" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sympy/geometry/util.py": [ + "imports", + "idiff" + ] + }, + "alternative_spans": { + "sympy/geometry/util.py": [ + "imports", + "idiff" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sympy/geometry/util.py": [ + "imports", + "idiff" + ] + }, + "alternative_spans": { + "sympy/geometry/util.py": [ + "imports", + "idiff" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sympy/geometry/util.py": [ + "idiff" + ] + }, + "alternative_spans": { + "sympy/geometry/util.py": [ + "idiff" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-10914", + "repo": "django/django", + "base_commit": "e7fd69d051eaa67cb17f172a39b57253e9cb831a", + "problem_statement": "Set default FILE_UPLOAD_PERMISSION to 0o644.\nDescription\n\t\nHello,\nAs far as I can see, the \u200bFile Uploads documentation page does not mention any permission issues.\nWhat I would like to see is a warning that in absence of explicitly configured FILE_UPLOAD_PERMISSIONS, the permissions for a file uploaded to FileSystemStorage might not be consistent depending on whether a MemoryUploadedFile or a TemporaryUploadedFile was used for temporary storage of the uploaded data (which, with the default FILE_UPLOAD_HANDLERS, in turn depends on the uploaded data size).\nThe tempfile.NamedTemporaryFile + os.rename sequence causes the resulting file permissions to be 0o0600 on some systems (I experience it here on CentOS 7.4.1708 and Python 3.6.5). In all probability, the implementation of Python's built-in tempfile module explicitly sets such permissions for temporary files due to security considerations.\nI found mentions of this issue \u200bon GitHub, but did not manage to find any existing bug report in Django's bug tracker.\n", + "golden_patch": "diff --git a/django/conf/global_settings.py b/django/conf/global_settings.py\n--- a/django/conf/global_settings.py\n+++ b/django/conf/global_settings.py\n@@ -304,7 +304,7 @@ def gettext_noop(s):\n \n # The numeric mode to set newly-uploaded files to. The value should be a mode\n # you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories.\n-FILE_UPLOAD_PERMISSIONS = None\n+FILE_UPLOAD_PERMISSIONS = 0o644\n \n # The numeric mode to assign to newly-created directories, when uploading files.\n # The value should be a mode as you'd pass to os.chmod;\n", + "test_patch": "diff --git a/tests/test_utils/tests.py b/tests/test_utils/tests.py\n--- a/tests/test_utils/tests.py\n+++ b/tests/test_utils/tests.py\n@@ -1099,7 +1099,7 @@ def test_override_file_upload_permissions(self):\n the file_permissions_mode attribute of\n django.core.files.storage.default_storage.\n \"\"\"\n- self.assertIsNone(default_storage.file_permissions_mode)\n+ self.assertEqual(default_storage.file_permissions_mode, 0o644)\n with self.settings(FILE_UPLOAD_PERMISSIONS=0o777):\n self.assertEqual(default_storage.file_permissions_mode, 0o777)\n \n", + "fail_to_pass": "[\"test_override_file_upload_permissions (test_utils.tests.OverrideSettingsTests)\"]", + "pass_to_pass": "[\"test_allowed_database_chunked_cursor_queries (test_utils.tests.AllowedDatabaseQueriesTests)\", \"test_allowed_database_queries (test_utils.tests.AllowedDatabaseQueriesTests)\", \"test_skip_if_db_feature (test_utils.tests.SkippingTestCase)\", \"test_skip_unless_db_feature (test_utils.tests.SkippingTestCase)\", \"test_equal_parsing_errors (test_utils.tests.JSONEqualTests)\", \"test_not_equal_parsing_errors (test_utils.tests.JSONEqualTests)\", \"test_simple_equal (test_utils.tests.JSONEqualTests)\", \"test_simple_equal_raise (test_utils.tests.JSONEqualTests)\", \"test_simple_equal_unordered (test_utils.tests.JSONEqualTests)\", \"test_simple_not_equal (test_utils.tests.JSONEqualTests)\", \"test_simple_not_equal_raise (test_utils.tests.JSONEqualTests)\", \"test_assert_raises_message (test_utils.tests.AssertRaisesMsgTest)\", \"assertRaisesMessage shouldn't interpret RE special chars.\", \"test_failure_in_setUpTestData_should_rollback_transaction (test_utils.tests.TestBadSetUpTestData)\", \"test_all (test_utils.tests.DatabaseAliasTests)\", \"test_close_match (test_utils.tests.DatabaseAliasTests)\", \"test_match (test_utils.tests.DatabaseAliasTests)\", \"test_no_close_match (test_utils.tests.DatabaseAliasTests)\", \"test_missing_default_databases (test_utils.tests.SkippingClassTestCase)\", \"test_skip_class_unless_db_feature (test_utils.tests.SkippingClassTestCase)\", \"test_ordered (test_utils.tests.AssertQuerysetEqualTests)\", \"test_repeated_values (test_utils.tests.AssertQuerysetEqualTests)\", \"test_transform (test_utils.tests.AssertQuerysetEqualTests)\", \"test_undefined_order (test_utils.tests.AssertQuerysetEqualTests)\", \"test_unordered (test_utils.tests.AssertQuerysetEqualTests)\", \"test_disallowed_database_chunked_cursor_queries (test_utils.tests.DisallowedDatabaseQueriesTests)\", \"test_disallowed_database_connections (test_utils.tests.DisallowedDatabaseQueriesTests)\", \"test_disallowed_database_queries (test_utils.tests.DisallowedDatabaseQueriesTests)\", \"test_equal (test_utils.tests.AssertURLEqualTests)\", \"test_message (test_utils.tests.AssertURLEqualTests)\", \"test_msg_prefix (test_utils.tests.AssertURLEqualTests)\", \"test_not_equal (test_utils.tests.AssertURLEqualTests)\", \"test_allowed_hosts (test_utils.tests.SetupTestEnvironmentTests)\", \"test_setup_test_environment_calling_more_than_once (test_utils.tests.SetupTestEnvironmentTests)\", \"An exception is setUp() is reraised after disable() is called.\", \"test_callable (test_utils.tests.AssertWarnsMessageTests)\", \"test_context_manager (test_utils.tests.AssertWarnsMessageTests)\", \"test_context_manager_failure (test_utils.tests.AssertWarnsMessageTests)\", \"test_special_re_chars (test_utils.tests.AssertWarnsMessageTests)\", \"test_comment_root (test_utils.tests.XMLEqualTests)\", \"test_parsing_errors (test_utils.tests.XMLEqualTests)\", \"test_simple_equal (test_utils.tests.XMLEqualTests)\", \"test_simple_equal_raise (test_utils.tests.XMLEqualTests)\", \"test_simple_equal_raises_message (test_utils.tests.XMLEqualTests)\", \"test_simple_equal_unordered (test_utils.tests.XMLEqualTests)\", \"test_simple_equal_with_leading_or_trailing_whitespace (test_utils.tests.XMLEqualTests)\", \"test_simple_not_equal (test_utils.tests.XMLEqualTests)\", \"test_simple_not_equal_raise (test_utils.tests.XMLEqualTests)\", \"test_simple_not_equal_with_whitespace_in_the_middle (test_utils.tests.XMLEqualTests)\", \"test_attributes (test_utils.tests.HTMLEqualTests)\", \"test_complex_examples (test_utils.tests.HTMLEqualTests)\", \"test_contains_html (test_utils.tests.HTMLEqualTests)\", \"test_count (test_utils.tests.HTMLEqualTests)\", \"test_html_contain (test_utils.tests.HTMLEqualTests)\", \"test_html_parser (test_utils.tests.HTMLEqualTests)\", \"test_ignore_comments (test_utils.tests.HTMLEqualTests)\", \"test_parse_html_in_script (test_utils.tests.HTMLEqualTests)\", \"test_parsing_errors (test_utils.tests.HTMLEqualTests)\", \"test_self_closing_tags (test_utils.tests.HTMLEqualTests)\", \"test_simple_equal_html (test_utils.tests.HTMLEqualTests)\", \"test_unequal_html (test_utils.tests.HTMLEqualTests)\", \"test_unicode_handling (test_utils.tests.HTMLEqualTests)\", \"test_assert_field_output (test_utils.tests.AssertFieldOutputTests)\", \"test_custom_required_message (test_utils.tests.AssertFieldOutputTests)\", \"test_class_decoration (test_utils.tests.IsolatedAppsTests)\", \"test_context_manager (test_utils.tests.IsolatedAppsTests)\", \"test_installed_apps (test_utils.tests.IsolatedAppsTests)\", \"test_method_decoration (test_utils.tests.IsolatedAppsTests)\", \"test_nested (test_utils.tests.IsolatedAppsTests)\", \"test_ignores_connection_configuration_queries (test_utils.tests.AssertNumQueriesUponConnectionTests)\", \"test_override_database_routers (test_utils.tests.OverrideSettingsTests)\", \"test_override_file_upload_directory_permissions (test_utils.tests.OverrideSettingsTests)\", \"test_override_media_root (test_utils.tests.OverrideSettingsTests)\", \"test_override_media_url (test_utils.tests.OverrideSettingsTests)\", \"test_override_static_root (test_utils.tests.OverrideSettingsTests)\", \"test_override_static_url (test_utils.tests.OverrideSettingsTests)\", \"test_override_staticfiles_dirs (test_utils.tests.OverrideSettingsTests)\", \"test_override_staticfiles_finders (test_utils.tests.OverrideSettingsTests)\", \"test_override_staticfiles_storage (test_utils.tests.OverrideSettingsTests)\", \"test_urlconf_cache (test_utils.tests.OverrideSettingsTests)\", \"test_urlconf_first (test_utils.tests.OverrideSettingsTests)\", \"test_urlconf_second (test_utils.tests.OverrideSettingsTests)\", \"test_failure (test_utils.tests.AssertNumQueriesContextManagerTests)\", \"test_simple (test_utils.tests.AssertNumQueriesContextManagerTests)\", \"test_with_client (test_utils.tests.AssertNumQueriesContextManagerTests)\", \"test_assert_used_on_http_response (test_utils.tests.AssertTemplateUsedContextManagerTests)\", \"test_error_message (test_utils.tests.AssertTemplateUsedContextManagerTests)\", \"test_failure (test_utils.tests.AssertTemplateUsedContextManagerTests)\", \"test_nested_usage (test_utils.tests.AssertTemplateUsedContextManagerTests)\", \"test_not_used (test_utils.tests.AssertTemplateUsedContextManagerTests)\", \"test_usage (test_utils.tests.AssertTemplateUsedContextManagerTests)\", \"test_failure (test_utils.tests.CaptureQueriesContextManagerTests)\", \"test_nested (test_utils.tests.CaptureQueriesContextManagerTests)\", \"test_simple (test_utils.tests.CaptureQueriesContextManagerTests)\", \"test_with_client (test_utils.tests.CaptureQueriesContextManagerTests)\", \"test_within (test_utils.tests.CaptureQueriesContextManagerTests)\", \"test_assert_num_queries (test_utils.tests.AssertNumQueriesTests)\", \"test_assert_num_queries_with_client (test_utils.tests.AssertNumQueriesTests)\"]", + "expected_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "test_file_spans": { + "tests/test_utils/tests.py": [ + "OverrideSettingsTests.test_override_file_upload_permissions" + ] + }, + "resolved_by": [ + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ], + "django/core/files/storage.py": [ + "FileSystemStorage._save" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20231010_rag_claude2", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ], + "tests/file_uploads/tests.py": [ + "imports" + ], + "tests/staticfiles_tests/settings.py": [ + "imports" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ], + "django/core/files/storage.py": [ + "FileSystemStorage.directory_permissions_mode" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ], + "django/core/files/storage.py": [ + "FileSystemStorage.file_permissions_mode" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ], + "django/core/files/storage.py": [ + "FileSystemStorage._save" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/core/files/storage.py": [ + "FileSystemStorage.file_permissions_mode", + "FileSystemStorage._save" + ] + }, + "alternative_spans": {} + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ], + "django/core/files/storage.py": [ + "FileSystemStorage._save" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ], + "django/core/files/move.py": [ + "file_move_safe" + ], + "django/core/files/storage.py": [ + "FileSystemStorage._save" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ], + "django/core/files/storage.py": [ + "FileSystemStorage.file_permissions_mode" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240402_rag_claude3opus", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + }, + "alternative_spans": { + "django/conf/global_settings.py": [ + "impl:105" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-10924", + "repo": "django/django", + "base_commit": "bceadd2788dc2dad53eba0caae172bd8522fd483", + "problem_statement": "Allow FilePathField path to accept a callable.\nDescription\n\t\nI have a special case where I want to create a model containing the path to some local files on the server/dev machine. Seeing as the place where these files are stored is different on different machines I have the following:\nimport os\nfrom django.conf import settings\nfrom django.db import models\nclass LocalFiles(models.Model):\n\tname = models.CharField(max_length=255)\n\tfile = models.FilePathField(path=os.path.join(settings.LOCAL_FILE_DIR, 'example_dir'))\nNow when running manage.py makemigrations it will resolve the path based on the machine it is being run on. Eg: /home//server_files/example_dir\nI had to manually change the migration to include the os.path.join() part to not break this when running the migration on production/other machine.\n", + "golden_patch": "diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -1709,7 +1709,7 @@ def get_prep_value(self, value):\n \n def formfield(self, **kwargs):\n return super().formfield(**{\n- 'path': self.path,\n+ 'path': self.path() if callable(self.path) else self.path,\n 'match': self.match,\n 'recursive': self.recursive,\n 'form_class': forms.FilePathField,\n", + "test_patch": "diff --git a/tests/model_fields/test_filepathfield.py b/tests/model_fields/test_filepathfield.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/model_fields/test_filepathfield.py\n@@ -0,0 +1,22 @@\n+import os\n+\n+from django.db.models import FilePathField\n+from django.test import SimpleTestCase\n+\n+\n+class FilePathFieldTests(SimpleTestCase):\n+ def test_path(self):\n+ path = os.path.dirname(__file__)\n+ field = FilePathField(path=path)\n+ self.assertEqual(field.path, path)\n+ self.assertEqual(field.formfield().path, path)\n+\n+ def test_callable_path(self):\n+ path = os.path.dirname(__file__)\n+\n+ def generate_path():\n+ return path\n+\n+ field = FilePathField(path=generate_path)\n+ self.assertEqual(field.path(), path)\n+ self.assertEqual(field.formfield().path, path)\n", + "fail_to_pass": "[\"test_callable_path (model_fields.test_filepathfield.FilePathFieldTests)\"]", + "pass_to_pass": "[\"test_path (model_fields.test_filepathfield.FilePathFieldTests)\"]", + "expected_spans": { + "django/db/models/fields/__init__.py": [ + "FilePathField.formfield" + ] + }, + "test_file_spans": {}, + "resolved_by": [ + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "FilePathField.check", + "FilePathField.formfield", + "FilePathField" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "FilePathField.check", + "FilePathField.formfield", + "FilePathField" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "FilePathField.formfield" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "FilePathField.formfield" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "FilePathField.__init__", + "FilePathField.check", + "FilePathField._check_allowing_files_or_folders", + "FilePathField.deconstruct", + "FilePathField.formfield" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "FilePathField.__init__", + "FilePathField.check", + "FilePathField._check_allowing_files_or_folders", + "FilePathField.deconstruct", + "FilePathField.formfield" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "FilePathField.__init__", + "FilePathField.get_prep_value", + "FilePathField.formfield" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "FilePathField.__init__", + "FilePathField.get_prep_value", + "FilePathField.formfield" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "FilePathField.__init__", + "FilePathField.formfield" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "FilePathField.__init__", + "FilePathField.formfield" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "scikit-learn__scikit-learn-13142", + "repo": "scikit-learn/scikit-learn", + "base_commit": "1c8668b0a021832386470ddf740d834e02c66f69", + "problem_statement": "GaussianMixture predict and fit_predict disagree when n_init>1\n#### Description\r\nWhen `n_init` is specified in GaussianMixture, the results of fit_predict(X) and predict(X) are often different. The `test_gaussian_mixture_fit_predict` unit test doesn't catch this because it does not set `n_init`.\r\n\r\n#### Steps/Code to Reproduce\r\n```\r\npython\r\nfrom sklearn.mixture import GaussianMixture\r\nfrom sklearn.utils.testing import assert_array_equal\r\nimport numpy\r\nX = numpy.random.randn(1000,5)\r\nprint 'no n_init'\r\ngm = GaussianMixture(n_components=5)\r\nc1 = gm.fit_predict(X)\r\nc2 = gm.predict(X)\r\nassert_array_equal(c1,c2)\r\nprint 'n_init=5'\r\ngm = GaussianMixture(n_components=5, n_init=5)\r\nc1 = gm.fit_predict(X)\r\nc2 = gm.predict(X)\r\nassert_array_equal(c1,c2)\r\n```\r\n\r\n#### Expected Results\r\n```\r\nno n_init\r\nn_init=5\r\n```\r\nNo exceptions.\r\n\r\n#### Actual Results\r\n```\r\nno n_init\r\nn_init=5\r\nTraceback (most recent call last):\r\n File \"test_gm.py\", line 17, in \r\n assert_array_equal(c1,c2)\r\n File \"/home/scott/.local/lib/python2.7/site-packages/numpy/testing/_private/utils.py\", line 872, in assert_array_equal\r\n verbose=verbose, header='Arrays are not equal')\r\n File \"/home/scott/.local/lib/python2.7/site-packages/numpy/testing/_private/utils.py\", line 796, in assert_array_compare\r\n raise AssertionError(msg)\r\nAssertionError: \r\nArrays are not equal\r\n\r\n(mismatch 88.6%)\r\n x: array([4, 0, 1, 1, 1, 3, 3, 4, 4, 2, 0, 0, 1, 2, 0, 2, 0, 1, 3, 1, 1, 3,\r\n 2, 1, 0, 2, 1, 0, 2, 0, 3, 1, 2, 3, 3, 1, 0, 2, 2, 0, 3, 0, 2, 0,\r\n 4, 2, 3, 0, 4, 2, 4, 1, 0, 2, 2, 1, 3, 2, 1, 4, 0, 2, 2, 1, 1, 2,...\r\n y: array([4, 1, 0, 2, 2, 1, 1, 4, 4, 0, 4, 1, 0, 3, 1, 0, 2, 2, 1, 2, 0, 0,\r\n 1, 0, 4, 1, 0, 4, 0, 1, 1, 2, 3, 1, 4, 0, 1, 4, 4, 4, 0, 1, 0, 2,\r\n 4, 1, 1, 2, 4, 3, 4, 0, 2, 3, 2, 3, 0, 0, 2, 3, 3, 3, 3, 0, 3, 2,...\r\n```\r\n\r\n#### Versions\r\n```\r\nSystem:\r\n python: 2.7.15rc1 (default, Nov 12 2018, 14:31:15) [GCC 7.3.0]\r\n machine: Linux-4.15.0-43-generic-x86_64-with-Ubuntu-18.04-bionic\r\nexecutable: /usr/bin/python\r\n\r\nBLAS:\r\n macros: HAVE_CBLAS=None, NO_ATLAS_INFO=-1\r\ncblas_libs: cblas\r\n lib_dirs: /usr/lib/x86_64-linux-gnu\r\n\r\nPython deps:\r\n Cython: 0.28.5\r\n scipy: 1.2.0\r\nsetuptools: 39.0.1\r\n pip: 19.0.1\r\n numpy: 1.16.0\r\n pandas: 0.23.1\r\n sklearn: 0.20.2\r\n```\n", + "golden_patch": "diff --git a/sklearn/mixture/base.py b/sklearn/mixture/base.py\n--- a/sklearn/mixture/base.py\n+++ b/sklearn/mixture/base.py\n@@ -257,11 +257,6 @@ def fit_predict(self, X, y=None):\n best_params = self._get_parameters()\n best_n_iter = n_iter\n \n- # Always do a final e-step to guarantee that the labels returned by\n- # fit_predict(X) are always consistent with fit(X).predict(X)\n- # for any value of max_iter and tol (and any random_state).\n- _, log_resp = self._e_step(X)\n-\n if not self.converged_:\n warnings.warn('Initialization %d did not converge. '\n 'Try different init parameters, '\n@@ -273,6 +268,11 @@ def fit_predict(self, X, y=None):\n self.n_iter_ = best_n_iter\n self.lower_bound_ = max_lower_bound\n \n+ # Always do a final e-step to guarantee that the labels returned by\n+ # fit_predict(X) are always consistent with fit(X).predict(X)\n+ # for any value of max_iter and tol (and any random_state).\n+ _, log_resp = self._e_step(X)\n+\n return log_resp.argmax(axis=1)\n \n def _e_step(self, X):\n", + "test_patch": "diff --git a/sklearn/mixture/tests/test_bayesian_mixture.py b/sklearn/mixture/tests/test_bayesian_mixture.py\n--- a/sklearn/mixture/tests/test_bayesian_mixture.py\n+++ b/sklearn/mixture/tests/test_bayesian_mixture.py\n@@ -451,6 +451,15 @@ def test_bayesian_mixture_fit_predict(seed, max_iter, tol):\n assert_array_equal(Y_pred1, Y_pred2)\n \n \n+def test_bayesian_mixture_fit_predict_n_init():\n+ # Check that fit_predict is equivalent to fit.predict, when n_init > 1\n+ X = np.random.RandomState(0).randn(1000, 5)\n+ gm = BayesianGaussianMixture(n_components=5, n_init=10, random_state=0)\n+ y_pred1 = gm.fit_predict(X)\n+ y_pred2 = gm.predict(X)\n+ assert_array_equal(y_pred1, y_pred2)\n+\n+\n def test_bayesian_mixture_predict_predict_proba():\n # this is the same test as test_gaussian_mixture_predict_predict_proba()\n rng = np.random.RandomState(0)\ndiff --git a/sklearn/mixture/tests/test_gaussian_mixture.py b/sklearn/mixture/tests/test_gaussian_mixture.py\n--- a/sklearn/mixture/tests/test_gaussian_mixture.py\n+++ b/sklearn/mixture/tests/test_gaussian_mixture.py\n@@ -598,6 +598,15 @@ def test_gaussian_mixture_fit_predict(seed, max_iter, tol):\n assert_greater(adjusted_rand_score(Y, Y_pred2), .95)\n \n \n+def test_gaussian_mixture_fit_predict_n_init():\n+ # Check that fit_predict is equivalent to fit.predict, when n_init > 1\n+ X = np.random.RandomState(0).randn(1000, 5)\n+ gm = GaussianMixture(n_components=5, n_init=5, random_state=0)\n+ y_pred1 = gm.fit_predict(X)\n+ y_pred2 = gm.predict(X)\n+ assert_array_equal(y_pred1, y_pred2)\n+\n+\n def test_gaussian_mixture_fit():\n # recover the ground truth\n rng = np.random.RandomState(0)\n", + "fail_to_pass": "[\"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_fit_predict_n_init\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_fit_predict_n_init\"]", + "pass_to_pass": "[\"sklearn/mixture/tests/test_bayesian_mixture.py::test_log_dirichlet_norm\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_log_wishart_norm\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_covariance_type\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_weight_concentration_prior_type\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_weights_prior_initialisation\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_mean_prior_initialisation\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_precisions_prior_initialisation\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_check_is_fitted\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_weights\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_monotonic_likelihood\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_compare_covar_type\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_check_covariance_precision\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_invariant_translation\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_fit_predict[0-2-1e-07]\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_fit_predict[1-2-0.1]\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_fit_predict[3-300-1e-07]\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_fit_predict[4-300-0.1]\", \"sklearn/mixture/tests/test_bayesian_mixture.py::test_bayesian_mixture_predict_predict_proba\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_attributes\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_check_X\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_check_weights\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_check_means\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_check_precisions\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_suffstat_sk_full\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_suffstat_sk_tied\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_suffstat_sk_diag\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_suffstat_sk_spherical\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_compute_log_det_cholesky\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_log_probabilities\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_estimate_log_prob_resp\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_predict_predict_proba\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_fit_predict[0-2-1e-07]\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_fit_predict[1-2-0.1]\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_fit_predict[3-300-1e-07]\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_fit_predict[4-300-0.1]\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_fit\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_fit_best_params\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_fit_convergence_warning\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_multiple_init\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_n_parameters\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_bic_1d_1component\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_aic_bic\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_gaussian_mixture_verbose\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_warm_start[0]\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_warm_start[1]\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_warm_start[2]\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_convergence_detected_with_warm_start\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_score\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_score_samples\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_monotonic_likelihood\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_regularisation\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_property\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_sample\", \"sklearn/mixture/tests/test_gaussian_mixture.py::test_init\"]", + "expected_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + }, + "test_file_spans": { + "sklearn/mixture/tests/test_bayesian_mixture.py": [ + "test_bayesian_mixture_predict_predict_proba" + ], + "sklearn/mixture/tests/test_gaussian_mixture.py": [ + "test_gaussian_mixture_fit" + ] + }, + "resolved_by": [ + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict", + "BaseMixture.predict" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict", + "BaseMixture.predict" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sklearn/mixture/base.py": [ + "docstring", + "imports", + "_check_shape", + "_check_X", + "BaseMixture.__init__", + "BaseMixture._check_initial_parameters", + "BaseMixture._initialize_parameters", + "BaseMixture.fit_predict", + "BaseMixture.score_samples", + "BaseMixture.sample", + "BaseMixture._estimate_log_prob_resp", + "BaseMixture._print_verbose_msg_init_beg", + "BaseMixture._print_verbose_msg_iter_end", + "BaseMixture._print_verbose_msg_init_end" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "docstring", + "imports", + "_check_shape", + "_check_X", + "BaseMixture.__init__", + "BaseMixture._check_initial_parameters", + "BaseMixture._initialize_parameters", + "BaseMixture.fit_predict", + "BaseMixture.score_samples", + "BaseMixture.sample", + "BaseMixture._estimate_log_prob_resp", + "BaseMixture._print_verbose_msg_init_beg", + "BaseMixture._print_verbose_msg_iter_end", + "BaseMixture._print_verbose_msg_init_end" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ], + "sklearn/mixture/tests/test_gaussian_mixture.py": [ + "imports", + "test_gaussian_mixture_fit_predict" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit", + "BaseMixture.fit_predict" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit", + "BaseMixture.fit_predict" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit", + "BaseMixture.fit_predict" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit", + "BaseMixture.fit_predict" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + }, + "alternative_spans": { + "sklearn/mixture/base.py": [ + "BaseMixture.fit_predict" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-11001", + "repo": "django/django", + "base_commit": "ef082ebb84f00e38af4e8880d04e8365c2766d34", + "problem_statement": "Incorrect removal of order_by clause created as multiline RawSQL\nDescription\n\t\nHi.\nThe SQLCompiler is ripping off one of my \"order by\" clause, because he \"thinks\" the clause was already \"seen\" (in SQLCompiler.get_order_by()). I'm using expressions written as multiline RawSQLs, which are similar but not the same. \nThe bug is located in SQLCompiler.get_order_by(), somewhere around line computing part of SQL query without ordering:\nwithout_ordering = self.ordering_parts.search(sql).group(1)\nThe sql variable contains multiline sql. As a result, the self.ordering_parts regular expression is returning just a line containing ASC or DESC words. This line is added to seen set, and because my raw queries have identical last lines, only the first clasue is returing from SQLCompiler.get_order_by().\nAs a quick/temporal fix I can suggest making sql variable clean of newline characters, like this:\nsql_oneline = ' '.join(sql.split('\\n'))\nwithout_ordering = self.ordering_parts.search(sql_oneline).group(1)\nNote: beware of unicode (Py2.x u'') and EOL dragons (\\r).\nExample of my query:\n\treturn MyModel.objects.all().order_by(\n\t\tRawSQL('''\n\t\t\tcase when status in ('accepted', 'verification')\n\t\t\t\t then 2 else 1 end''', []).desc(),\n\t\tRawSQL('''\n\t\t\tcase when status in ('accepted', 'verification')\n\t\t\t\t then (accepted_datetime, preferred_datetime)\n\t\t\t\t else null end''', []).asc(),\n\t\tRawSQL('''\n\t\t\tcase when status not in ('accepted', 'verification')\n\t\t\t\t then (accepted_datetime, preferred_datetime, created_at)\n\t\t\t\t else null end''', []).desc())\nThe ordering_parts.search is returing accordingly:\n'\t\t\t\t then 2 else 1 end)'\n'\t\t\t\t else null end'\n'\t\t\t\t else null end'\nSecond RawSQL with a\t\t\t\t else null end part is removed from query.\nThe fun thing is that the issue can be solved by workaround by adding a space or any other char to the last line. \nSo in case of RawSQL I can just say, that current implementation of avoiding duplicates in order by clause works only for special/rare cases (or does not work in all cases). \nThe bug filed here is about wrong identification of duplicates (because it compares only last line of SQL passed to order by clause).\nHope my notes will help you fixing the issue. Sorry for my english.\n", + "golden_patch": "diff --git a/django/db/models/sql/compiler.py b/django/db/models/sql/compiler.py\n--- a/django/db/models/sql/compiler.py\n+++ b/django/db/models/sql/compiler.py\n@@ -32,7 +32,8 @@ def __init__(self, query, connection, using):\n self.select = None\n self.annotation_col_map = None\n self.klass_info = None\n- self.ordering_parts = re.compile(r'(.*)\\s(ASC|DESC)(.*)')\n+ # Multiline ordering SQL clause may appear from RawSQL.\n+ self.ordering_parts = re.compile(r'^(.*)\\s(ASC|DESC)(.*)', re.MULTILINE | re.DOTALL)\n self._meta_ordering = None\n \n def setup_query(self):\n", + "test_patch": "diff --git a/tests/expressions/tests.py b/tests/expressions/tests.py\n--- a/tests/expressions/tests.py\n+++ b/tests/expressions/tests.py\n@@ -384,6 +384,29 @@ def test_order_by_exists(self):\n )\n self.assertSequenceEqual(mustermanns_by_seniority, [self.max, mary])\n \n+ def test_order_by_multiline_sql(self):\n+ raw_order_by = (\n+ RawSQL('''\n+ CASE WHEN num_employees > 1000\n+ THEN num_chairs\n+ ELSE 0 END\n+ ''', []).desc(),\n+ RawSQL('''\n+ CASE WHEN num_chairs > 1\n+ THEN 1\n+ ELSE 0 END\n+ ''', []).asc()\n+ )\n+ for qs in (\n+ Company.objects.all(),\n+ Company.objects.distinct(),\n+ ):\n+ with self.subTest(qs=qs):\n+ self.assertSequenceEqual(\n+ qs.order_by(*raw_order_by),\n+ [self.example_inc, self.gmbh, self.foobar_ltd],\n+ )\n+\n def test_outerref(self):\n inner = Company.objects.filter(point_of_contact=OuterRef('pk'))\n msg = (\n", + "fail_to_pass": "[\"test_order_by_multiline_sql (expressions.tests.BasicExpressionsTests)\", \"test_order_of_operations (expressions.tests.BasicExpressionsTests)\"]", + "pass_to_pass": "[\"test_deconstruct (expressions.tests.FTests)\", \"test_deepcopy (expressions.tests.FTests)\", \"test_equal (expressions.tests.FTests)\", \"test_hash (expressions.tests.FTests)\", \"test_not_equal_Value (expressions.tests.FTests)\", \"test_and (expressions.tests.CombinableTests)\", \"test_negation (expressions.tests.CombinableTests)\", \"test_or (expressions.tests.CombinableTests)\", \"test_reversed_and (expressions.tests.CombinableTests)\", \"test_reversed_or (expressions.tests.CombinableTests)\", \"test_aggregates (expressions.tests.ReprTests)\", \"test_distinct_aggregates (expressions.tests.ReprTests)\", \"test_expressions (expressions.tests.ReprTests)\", \"test_filtered_aggregates (expressions.tests.ReprTests)\", \"test_functions (expressions.tests.ReprTests)\", \"test_equal (expressions.tests.SimpleExpressionTests)\", \"test_hash (expressions.tests.SimpleExpressionTests)\", \"test_month_aggregation (expressions.tests.FieldTransformTests)\", \"test_multiple_transforms_in_values (expressions.tests.FieldTransformTests)\", \"test_transform_in_values (expressions.tests.FieldTransformTests)\", \"test_deconstruct (expressions.tests.ValueTests)\", \"test_deconstruct_output_field (expressions.tests.ValueTests)\", \"test_equal (expressions.tests.ValueTests)\", \"test_equal_output_field (expressions.tests.ValueTests)\", \"test_hash (expressions.tests.ValueTests)\", \"test_raise_empty_expressionlist (expressions.tests.ValueTests)\", \"test_update_TimeField_using_Value (expressions.tests.ValueTests)\", \"test_update_UUIDField_using_Value (expressions.tests.ValueTests)\", \"test_complex_expressions (expressions.tests.ExpressionsNumericTests)\", \"test_fill_with_value_from_same_object (expressions.tests.ExpressionsNumericTests)\", \"test_filter_not_equals_other_field (expressions.tests.ExpressionsNumericTests)\", \"test_increment_value (expressions.tests.ExpressionsNumericTests)\", \"test_F_reuse (expressions.tests.ExpressionsTests)\", \"test_insensitive_patterns_escape (expressions.tests.ExpressionsTests)\", \"test_patterns_escape (expressions.tests.ExpressionsTests)\", \"test_complex_expressions_do_not_introduce_sql_injection_via_untrusted_string_inclusion (expressions.tests.IterableLookupInnerExpressionsTests)\", \"test_expressions_in_lookups_join_choice (expressions.tests.IterableLookupInnerExpressionsTests)\", \"test_in_lookup_allows_F_expressions_and_expressions_for_datetimes (expressions.tests.IterableLookupInnerExpressionsTests)\", \"test_in_lookup_allows_F_expressions_and_expressions_for_integers (expressions.tests.IterableLookupInnerExpressionsTests)\", \"test_range_lookup_allows_F_expressions_and_expressions_for_integers (expressions.tests.IterableLookupInnerExpressionsTests)\", \"test_lefthand_addition (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_bitwise_and (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_bitwise_left_shift_operator (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_bitwise_or (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_bitwise_right_shift_operator (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_division (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_modulo (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_multiplication (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_power (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_subtraction (expressions.tests.ExpressionOperatorTests)\", \"test_right_hand_addition (expressions.tests.ExpressionOperatorTests)\", \"test_right_hand_division (expressions.tests.ExpressionOperatorTests)\", \"test_right_hand_modulo (expressions.tests.ExpressionOperatorTests)\", \"test_right_hand_multiplication (expressions.tests.ExpressionOperatorTests)\", \"test_right_hand_subtraction (expressions.tests.ExpressionOperatorTests)\", \"test_righthand_power (expressions.tests.ExpressionOperatorTests)\", \"test_aggregate_subquery_annotation (expressions.tests.BasicExpressionsTests)\", \"test_annotate_values_aggregate (expressions.tests.BasicExpressionsTests)\", \"test_annotate_values_count (expressions.tests.BasicExpressionsTests)\", \"test_annotate_values_filter (expressions.tests.BasicExpressionsTests)\", \"test_annotation_with_outerref (expressions.tests.BasicExpressionsTests)\", \"test_annotations_within_subquery (expressions.tests.BasicExpressionsTests)\", \"test_arithmetic (expressions.tests.BasicExpressionsTests)\", \"test_exist_single_field_output_field (expressions.tests.BasicExpressionsTests)\", \"test_explicit_output_field (expressions.tests.BasicExpressionsTests)\", \"test_filter_inter_attribute (expressions.tests.BasicExpressionsTests)\", \"test_filter_with_join (expressions.tests.BasicExpressionsTests)\", \"test_filtering_on_annotate_that_uses_q (expressions.tests.BasicExpressionsTests)\", \"test_in_subquery (expressions.tests.BasicExpressionsTests)\", \"test_incorrect_field_in_F_expression (expressions.tests.BasicExpressionsTests)\", \"test_incorrect_joined_field_in_F_expression (expressions.tests.BasicExpressionsTests)\", \"test_nested_subquery (expressions.tests.BasicExpressionsTests)\", \"test_nested_subquery_outer_ref_2 (expressions.tests.BasicExpressionsTests)\", \"test_nested_subquery_outer_ref_with_autofield (expressions.tests.BasicExpressionsTests)\", \"test_new_object_create (expressions.tests.BasicExpressionsTests)\", \"test_new_object_save (expressions.tests.BasicExpressionsTests)\", \"test_object_create_with_aggregate (expressions.tests.BasicExpressionsTests)\", \"test_object_update (expressions.tests.BasicExpressionsTests)\", \"test_object_update_fk (expressions.tests.BasicExpressionsTests)\", \"test_object_update_unsaved_objects (expressions.tests.BasicExpressionsTests)\", \"test_order_by_exists (expressions.tests.BasicExpressionsTests)\", \"test_outerref (expressions.tests.BasicExpressionsTests)\", \"test_outerref_mixed_case_table_name (expressions.tests.BasicExpressionsTests)\", \"test_outerref_with_operator (expressions.tests.BasicExpressionsTests)\", \"test_parenthesis_priority (expressions.tests.BasicExpressionsTests)\", \"test_pickle_expression (expressions.tests.BasicExpressionsTests)\", \"test_subquery (expressions.tests.BasicExpressionsTests)\", \"test_subquery_filter_by_aggregate (expressions.tests.BasicExpressionsTests)\", \"test_subquery_references_joined_table_twice (expressions.tests.BasicExpressionsTests)\", \"test_ticket_11722_iexact_lookup (expressions.tests.BasicExpressionsTests)\", \"test_ticket_16731_startswith_lookup (expressions.tests.BasicExpressionsTests)\", \"test_ticket_18375_chained_filters (expressions.tests.BasicExpressionsTests)\", \"test_ticket_18375_join_reuse (expressions.tests.BasicExpressionsTests)\", \"test_ticket_18375_kwarg_ordering (expressions.tests.BasicExpressionsTests)\", \"test_ticket_18375_kwarg_ordering_2 (expressions.tests.BasicExpressionsTests)\", \"test_update (expressions.tests.BasicExpressionsTests)\", \"test_update_inherited_field_value (expressions.tests.BasicExpressionsTests)\", \"test_update_with_fk (expressions.tests.BasicExpressionsTests)\", \"test_update_with_none (expressions.tests.BasicExpressionsTests)\", \"test_uuid_pk_subquery (expressions.tests.BasicExpressionsTests)\", \"test_date_comparison (expressions.tests.FTimeDeltaTests)\", \"test_date_minus_duration (expressions.tests.FTimeDeltaTests)\", \"test_date_subtraction (expressions.tests.FTimeDeltaTests)\", \"test_datetime_subtraction (expressions.tests.FTimeDeltaTests)\", \"test_datetime_subtraction_microseconds (expressions.tests.FTimeDeltaTests)\", \"test_delta_add (expressions.tests.FTimeDeltaTests)\", \"test_delta_subtract (expressions.tests.FTimeDeltaTests)\", \"test_delta_update (expressions.tests.FTimeDeltaTests)\", \"test_duration_with_datetime (expressions.tests.FTimeDeltaTests)\", \"test_duration_with_datetime_microseconds (expressions.tests.FTimeDeltaTests)\", \"test_durationfield_add (expressions.tests.FTimeDeltaTests)\", \"test_exclude (expressions.tests.FTimeDeltaTests)\", \"test_invalid_operator (expressions.tests.FTimeDeltaTests)\", \"test_mixed_comparisons2 (expressions.tests.FTimeDeltaTests)\", \"test_multiple_query_compilation (expressions.tests.FTimeDeltaTests)\", \"test_negative_timedelta_update (expressions.tests.FTimeDeltaTests)\", \"test_query_clone (expressions.tests.FTimeDeltaTests)\", \"test_time_subtraction (expressions.tests.FTimeDeltaTests)\"]", + "expected_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.__init__" + ] + }, + "test_file_spans": { + "tests/expressions/tests.py": [ + "BasicExpressionsTests.test_outerref" + ] + }, + "resolved_by": [ + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + }, + "alternative_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by" + ] + }, + "alternative_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + }, + "alternative_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.__init__", + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + }, + "alternative_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.__init__", + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.__init__", + "SQLCompiler.get_order_by" + ] + }, + "alternative_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.__init__", + "SQLCompiler.get_order_by" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + }, + "alternative_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.__init__", + "SQLCompiler.get_order_by" + ] + }, + "alternative_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.__init__", + "SQLCompiler.get_order_by" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + }, + "alternative_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + }, + "alternative_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + }, + "alternative_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.__init__", + "SQLCompiler.get_order_by" + ] + }, + "alternative_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.__init__", + "SQLCompiler.get_order_by" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + }, + "alternative_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/db/models/expressions.py": [ + "RawSQL.__init__" + ], + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by" + ], + "django/db/models/sql/query.py": [ + "Query.__init__" + ] + }, + "alternative_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + }, + "alternative_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240524_opencsg_starship_gpt4", + "spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + } + }, + { + "run_name": "20240702_codestory_aide_mixed", + "spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by" + ] + } + }, + { + "run_name": "20240623_moatless_claude35sonnet", + "spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + } + }, + { + "run_name": "20240604_CodeR", + "spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + } + }, + { + "run_name": "20241016_IBM-SWE-1.0", + "spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + } + }, + { + "run_name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + } + }, + { + "run_name": "20240612_IBM_Research_Agent101", + "spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + } + }, + { + "run_name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + } + }, + { + "run_name": "20240811_gru", + "spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by" + ] + } + }, + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.get_order_by", + "SQLCompiler.get_extra_select" + ] + } + } + ] + }, + { + "instance_id": "django__django-11019", + "repo": "django/django", + "base_commit": "93e892bb645b16ebaf287beb5fe7f3ffe8d10408", + "problem_statement": "Merging 3 or more media objects can throw unnecessary MediaOrderConflictWarnings\nDescription\n\t\nConsider the following form definition, where text-editor-extras.js depends on text-editor.js but all other JS files are independent:\nfrom django import forms\nclass ColorPicker(forms.Widget):\n\tclass Media:\n\t\tjs = ['color-picker.js']\nclass SimpleTextWidget(forms.Widget):\n\tclass Media:\n\t\tjs = ['text-editor.js']\nclass FancyTextWidget(forms.Widget):\n\tclass Media:\n\t\tjs = ['text-editor.js', 'text-editor-extras.js', 'color-picker.js']\nclass MyForm(forms.Form):\n\tbackground_color = forms.CharField(widget=ColorPicker())\n\tintro = forms.CharField(widget=SimpleTextWidget())\n\tbody = forms.CharField(widget=FancyTextWidget())\nDjango should be able to resolve the JS files for the final form into the order text-editor.js, text-editor-extras.js, color-picker.js. However, accessing MyForm().media results in:\n/projects/django/django/forms/widgets.py:145: MediaOrderConflictWarning: Detected duplicate Media files in an opposite order:\ntext-editor-extras.js\ntext-editor.js\n MediaOrderConflictWarning,\nMedia(css={}, js=['text-editor-extras.js', 'color-picker.js', 'text-editor.js'])\nThe MediaOrderConflictWarning is a result of the order that the additions happen in: ColorPicker().media + SimpleTextWidget().media produces Media(css={}, js=['color-picker.js', 'text-editor.js']), which (wrongly) imposes the constraint that color-picker.js must appear before text-editor.js.\nThe final result is particularly unintuitive here, as it's worse than the \"na\u00efve\" result produced by Django 1.11 before order-checking was added (color-picker.js, text-editor.js, text-editor-extras.js), and the pair of files reported in the warning message seems wrong too (aren't color-picker.js and text-editor.js the wrong-ordered ones?)\n", + "golden_patch": "diff --git a/django/forms/widgets.py b/django/forms/widgets.py\n--- a/django/forms/widgets.py\n+++ b/django/forms/widgets.py\n@@ -6,16 +6,21 @@\n import datetime\n import re\n import warnings\n+from collections import defaultdict\n from itertools import chain\n \n from django.conf import settings\n from django.forms.utils import to_current_timezone\n from django.templatetags.static import static\n from django.utils import datetime_safe, formats\n+from django.utils.datastructures import OrderedSet\n from django.utils.dates import MONTHS\n from django.utils.formats import get_format\n from django.utils.html import format_html, html_safe\n from django.utils.safestring import mark_safe\n+from django.utils.topological_sort import (\n+ CyclicDependencyError, stable_topological_sort,\n+)\n from django.utils.translation import gettext_lazy as _\n \n from .renderers import get_default_renderer\n@@ -59,22 +64,15 @@ def __str__(self):\n \n @property\n def _css(self):\n- css = self._css_lists[0]\n- # filter(None, ...) avoids calling merge with empty dicts.\n- for obj in filter(None, self._css_lists[1:]):\n- css = {\n- medium: self.merge(css.get(medium, []), obj.get(medium, []))\n- for medium in css.keys() | obj.keys()\n- }\n- return css\n+ css = defaultdict(list)\n+ for css_list in self._css_lists:\n+ for medium, sublist in css_list.items():\n+ css[medium].append(sublist)\n+ return {medium: self.merge(*lists) for medium, lists in css.items()}\n \n @property\n def _js(self):\n- js = self._js_lists[0]\n- # filter(None, ...) avoids calling merge() with empty lists.\n- for obj in filter(None, self._js_lists[1:]):\n- js = self.merge(js, obj)\n- return js\n+ return self.merge(*self._js_lists)\n \n def render(self):\n return mark_safe('\\n'.join(chain.from_iterable(getattr(self, 'render_' + name)() for name in MEDIA_TYPES)))\n@@ -115,39 +113,37 @@ def __getitem__(self, name):\n raise KeyError('Unknown media type \"%s\"' % name)\n \n @staticmethod\n- def merge(list_1, list_2):\n+ def merge(*lists):\n \"\"\"\n- Merge two lists while trying to keep the relative order of the elements.\n- Warn if the lists have the same two elements in a different relative\n- order.\n+ Merge lists while trying to keep the relative order of the elements.\n+ Warn if the lists have the same elements in a different relative order.\n \n For static assets it can be important to have them included in the DOM\n in a certain order. In JavaScript you may not be able to reference a\n global or in CSS you might want to override a style.\n \"\"\"\n- # Start with a copy of list_1.\n- combined_list = list(list_1)\n- last_insert_index = len(list_1)\n- # Walk list_2 in reverse, inserting each element into combined_list if\n- # it doesn't already exist.\n- for path in reversed(list_2):\n- try:\n- # Does path already exist in the list?\n- index = combined_list.index(path)\n- except ValueError:\n- # Add path to combined_list since it doesn't exist.\n- combined_list.insert(last_insert_index, path)\n- else:\n- if index > last_insert_index:\n- warnings.warn(\n- 'Detected duplicate Media files in an opposite order:\\n'\n- '%s\\n%s' % (combined_list[last_insert_index], combined_list[index]),\n- MediaOrderConflictWarning,\n- )\n- # path already exists in the list. Update last_insert_index so\n- # that the following elements are inserted in front of this one.\n- last_insert_index = index\n- return combined_list\n+ dependency_graph = defaultdict(set)\n+ all_items = OrderedSet()\n+ for list_ in filter(None, lists):\n+ head = list_[0]\n+ # The first items depend on nothing but have to be part of the\n+ # dependency graph to be included in the result.\n+ dependency_graph.setdefault(head, set())\n+ for item in list_:\n+ all_items.add(item)\n+ # No self dependencies\n+ if head != item:\n+ dependency_graph[item].add(head)\n+ head = item\n+ try:\n+ return stable_topological_sort(all_items, dependency_graph)\n+ except CyclicDependencyError:\n+ warnings.warn(\n+ 'Detected duplicate Media files in an opposite order: {}'.format(\n+ ', '.join(repr(l) for l in lists)\n+ ), MediaOrderConflictWarning,\n+ )\n+ return list(all_items)\n \n def __add__(self, other):\n combined = Media()\n", + "test_patch": "diff --git a/tests/admin_inlines/tests.py b/tests/admin_inlines/tests.py\n--- a/tests/admin_inlines/tests.py\n+++ b/tests/admin_inlines/tests.py\n@@ -497,10 +497,10 @@ def test_inline_media_only_inline(self):\n response.context['inline_admin_formsets'][0].media._js,\n [\n 'admin/js/vendor/jquery/jquery.min.js',\n- 'admin/js/jquery.init.js',\n- 'admin/js/inlines.min.js',\n 'my_awesome_inline_scripts.js',\n 'custom_number.js',\n+ 'admin/js/jquery.init.js',\n+ 'admin/js/inlines.min.js',\n ]\n )\n self.assertContains(response, 'my_awesome_inline_scripts.js')\ndiff --git a/tests/admin_widgets/test_autocomplete_widget.py b/tests/admin_widgets/test_autocomplete_widget.py\n--- a/tests/admin_widgets/test_autocomplete_widget.py\n+++ b/tests/admin_widgets/test_autocomplete_widget.py\n@@ -139,4 +139,4 @@ def test_media(self):\n else:\n expected_files = base_files\n with translation.override(lang):\n- self.assertEqual(AutocompleteSelect(rel, admin.site).media._js, expected_files)\n+ self.assertEqual(AutocompleteSelect(rel, admin.site).media._js, list(expected_files))\ndiff --git a/tests/forms_tests/tests/test_media.py b/tests/forms_tests/tests/test_media.py\n--- a/tests/forms_tests/tests/test_media.py\n+++ b/tests/forms_tests/tests/test_media.py\n@@ -25,8 +25,8 @@ def test_construction(self):\n )\n self.assertEqual(\n repr(m),\n- \"Media(css={'all': ('path/to/css1', '/path/to/css2')}, \"\n- \"js=('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3'))\"\n+ \"Media(css={'all': ['path/to/css1', '/path/to/css2']}, \"\n+ \"js=['/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3'])\"\n )\n \n class Foo:\n@@ -125,8 +125,8 @@ class Media:\n \n \n \n-\n-\"\"\"\n+\n+\"\"\"\n )\n \n # media addition hasn't affected the original objects\n@@ -151,6 +151,17 @@ class Media:\n self.assertEqual(str(w4.media), \"\"\"\n \"\"\")\n \n+ def test_media_deduplication(self):\n+ # A deduplication test applied directly to a Media object, to confirm\n+ # that the deduplication doesn't only happen at the point of merging\n+ # two or more media objects.\n+ media = Media(\n+ css={'all': ('/path/to/css1', '/path/to/css1')},\n+ js=('/path/to/js1', '/path/to/js1'),\n+ )\n+ self.assertEqual(str(media), \"\"\"\n+\"\"\")\n+\n def test_media_property(self):\n ###############################################################\n # Property-based media definitions\n@@ -197,12 +208,12 @@ def _media(self):\n self.assertEqual(\n str(w6.media),\n \"\"\"\n-\n \n+\n \n+\n \n-\n-\"\"\"\n+\"\"\"\n )\n \n def test_media_inheritance(self):\n@@ -247,8 +258,8 @@ class Media:\n \n \n \n-\n-\"\"\"\n+\n+\"\"\"\n )\n \n def test_media_inheritance_from_property(self):\n@@ -322,8 +333,8 @@ class Media:\n \n \n \n-\n-\"\"\"\n+\n+\"\"\"\n )\n \n def test_media_inheritance_single_type(self):\n@@ -420,8 +431,8 @@ def __init__(self, attrs=None):\n \n \n \n-\n-\"\"\"\n+\n+\"\"\"\n )\n \n def test_form_media(self):\n@@ -462,8 +473,8 @@ class MyForm(Form):\n \n \n \n-\n-\"\"\"\n+\n+\"\"\"\n )\n \n # Form media can be combined to produce a single media definition.\n@@ -477,8 +488,8 @@ class AnotherForm(Form):\n \n \n \n-\n-\"\"\"\n+\n+\"\"\"\n )\n \n # Forms can also define media, following the same rules as widgets.\n@@ -495,28 +506,28 @@ class Media:\n self.assertEqual(\n str(f3.media),\n \"\"\"\n+\n \n \n-\n \n+\n \n-\n \n-\"\"\"\n+\"\"\"\n )\n \n # Media works in templates\n self.assertEqual(\n Template(\"{{ form.media.js }}{{ form.media.css }}\").render(Context({'form': f3})),\n \"\"\"\n+\n \n-\n \n-\"\"\"\n+\"\"\"\n \"\"\"\n+\n \n-\n-\"\"\"\n+\"\"\"\n )\n \n def test_html_safe(self):\n@@ -526,19 +537,23 @@ def test_html_safe(self):\n \n def test_merge(self):\n test_values = (\n- (([1, 2], [3, 4]), [1, 2, 3, 4]),\n+ (([1, 2], [3, 4]), [1, 3, 2, 4]),\n (([1, 2], [2, 3]), [1, 2, 3]),\n (([2, 3], [1, 2]), [1, 2, 3]),\n (([1, 3], [2, 3]), [1, 2, 3]),\n (([1, 2], [1, 3]), [1, 2, 3]),\n (([1, 2], [3, 2]), [1, 3, 2]),\n+ (([1, 2], [1, 2]), [1, 2]),\n+ ([[1, 2], [1, 3], [2, 3], [5, 7], [5, 6], [6, 7, 9], [8, 9]], [1, 5, 8, 2, 6, 3, 7, 9]),\n+ ((), []),\n+ (([1, 2],), [1, 2]),\n )\n- for (list1, list2), expected in test_values:\n- with self.subTest(list1=list1, list2=list2):\n- self.assertEqual(Media.merge(list1, list2), expected)\n+ for lists, expected in test_values:\n+ with self.subTest(lists=lists):\n+ self.assertEqual(Media.merge(*lists), expected)\n \n def test_merge_warning(self):\n- msg = 'Detected duplicate Media files in an opposite order:\\n1\\n2'\n+ msg = 'Detected duplicate Media files in an opposite order: [1, 2], [2, 1]'\n with self.assertWarnsMessage(RuntimeWarning, msg):\n self.assertEqual(Media.merge([1, 2], [2, 1]), [1, 2])\n \n@@ -546,28 +561,30 @@ def test_merge_js_three_way(self):\n \"\"\"\n The relative order of scripts is preserved in a three-way merge.\n \"\"\"\n- # custom_widget.js doesn't depend on jquery.js.\n- widget1 = Media(js=['custom_widget.js'])\n- widget2 = Media(js=['jquery.js', 'uses_jquery.js'])\n- form_media = widget1 + widget2\n- # The relative ordering of custom_widget.js and jquery.js has been\n- # established (but without a real need to).\n- self.assertEqual(form_media._js, ['custom_widget.js', 'jquery.js', 'uses_jquery.js'])\n- # The inline also uses custom_widget.js. This time, it's at the end.\n- inline_media = Media(js=['jquery.js', 'also_jquery.js']) + Media(js=['custom_widget.js'])\n- merged = form_media + inline_media\n- self.assertEqual(merged._js, ['custom_widget.js', 'jquery.js', 'uses_jquery.js', 'also_jquery.js'])\n+ widget1 = Media(js=['color-picker.js'])\n+ widget2 = Media(js=['text-editor.js'])\n+ widget3 = Media(js=['text-editor.js', 'text-editor-extras.js', 'color-picker.js'])\n+ merged = widget1 + widget2 + widget3\n+ self.assertEqual(merged._js, ['text-editor.js', 'text-editor-extras.js', 'color-picker.js'])\n+\n+ def test_merge_js_three_way2(self):\n+ # The merge prefers to place 'c' before 'b' and 'g' before 'h' to\n+ # preserve the original order. The preference 'c'->'b' is overridden by\n+ # widget3's media, but 'g'->'h' survives in the final ordering.\n+ widget1 = Media(js=['a', 'c', 'f', 'g', 'k'])\n+ widget2 = Media(js=['a', 'b', 'f', 'h', 'k'])\n+ widget3 = Media(js=['b', 'c', 'f', 'k'])\n+ merged = widget1 + widget2 + widget3\n+ self.assertEqual(merged._js, ['a', 'b', 'c', 'f', 'g', 'h', 'k'])\n \n def test_merge_css_three_way(self):\n- widget1 = Media(css={'screen': ['a.css']})\n- widget2 = Media(css={'screen': ['b.css']})\n- widget3 = Media(css={'all': ['c.css']})\n- form1 = widget1 + widget2\n- form2 = widget2 + widget1\n- # form1 and form2 have a.css and b.css in different order...\n- self.assertEqual(form1._css, {'screen': ['a.css', 'b.css']})\n- self.assertEqual(form2._css, {'screen': ['b.css', 'a.css']})\n- # ...but merging succeeds as the relative ordering of a.css and b.css\n- # was never specified.\n- merged = widget3 + form1 + form2\n- self.assertEqual(merged._css, {'screen': ['a.css', 'b.css'], 'all': ['c.css']})\n+ widget1 = Media(css={'screen': ['c.css'], 'all': ['d.css', 'e.css']})\n+ widget2 = Media(css={'screen': ['a.css']})\n+ widget3 = Media(css={'screen': ['a.css', 'b.css', 'c.css'], 'all': ['e.css']})\n+ merged = widget1 + widget2\n+ # c.css comes before a.css because widget1 + widget2 establishes this\n+ # order.\n+ self.assertEqual(merged._css, {'screen': ['c.css', 'a.css'], 'all': ['d.css', 'e.css']})\n+ merged = merged + widget3\n+ # widget3 contains an explicit ordering of c.css and a.css.\n+ self.assertEqual(merged._css, {'screen': ['a.css', 'b.css', 'c.css'], 'all': ['d.css', 'e.css']})\n", + "fail_to_pass": "[\"test_combine_media (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_construction (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_form_media (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_media_deduplication (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_media_inheritance (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_media_inheritance_extends (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_media_property_parent_references (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_merge (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_merge_css_three_way (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_merge_js_three_way (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_merge_js_three_way2 (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_merge_warning (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_multi_widget (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_media (admin_widgets.test_autocomplete_widget.AutocompleteMixinTests)\", \"test_render_options (admin_widgets.test_autocomplete_widget.AutocompleteMixinTests)\", \"test_inline_media_only_inline (admin_inlines.tests.TestInlineMedia)\"]", + "pass_to_pass": "[\"Regression for #9362\", \"test_html_safe (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_media_dsl (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_media_inheritance_from_property (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_media_inheritance_single_type (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_media_property (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_multi_media (forms_tests.tests.test_media.FormsMediaTestCase)\", \"test_build_attrs (admin_widgets.test_autocomplete_widget.AutocompleteMixinTests)\", \"test_build_attrs_no_custom_class (admin_widgets.test_autocomplete_widget.AutocompleteMixinTests)\", \"test_build_attrs_not_required_field (admin_widgets.test_autocomplete_widget.AutocompleteMixinTests)\", \"test_build_attrs_required_field (admin_widgets.test_autocomplete_widget.AutocompleteMixinTests)\", \"test_get_url (admin_widgets.test_autocomplete_widget.AutocompleteMixinTests)\", \"Empty option isn't present if the field isn't required.\", \"Empty option is present if the field isn't required.\", \"test_deleting_inline_with_protected_delete_does_not_validate (admin_inlines.tests.TestInlineProtectedOnDelete)\", \"test_all_inline_media (admin_inlines.tests.TestInlineMedia)\", \"test_inline_media_only_base (admin_inlines.tests.TestInlineMedia)\", \"test_inline_add_fk_add_perm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_add_fk_noperm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_add_m2m_add_perm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_add_m2m_noperm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_fk_add_change_perm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_fk_add_perm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_fk_all_perms (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_fk_change_del_perm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_fk_change_perm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_fk_noperm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_m2m_add_perm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_m2m_change_perm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_m2m_noperm (admin_inlines.tests.TestInlinePermissions)\", \"Admin inline should invoke local callable when its name is listed in readonly_fields\", \"test_can_delete (admin_inlines.tests.TestInline)\", \"test_create_inlines_on_inherited_model (admin_inlines.tests.TestInline)\", \"test_custom_form_tabular_inline_label (admin_inlines.tests.TestInline)\", \"test_custom_form_tabular_inline_overridden_label (admin_inlines.tests.TestInline)\", \"test_custom_get_extra_form (admin_inlines.tests.TestInline)\", \"test_custom_min_num (admin_inlines.tests.TestInline)\", \"test_custom_pk_shortcut (admin_inlines.tests.TestInline)\", \"test_help_text (admin_inlines.tests.TestInline)\", \"test_inline_editable_pk (admin_inlines.tests.TestInline)\", \"#18263 -- Make sure hidden fields don't get a column in tabular inlines\", \"test_inline_nonauto_noneditable_inherited_pk (admin_inlines.tests.TestInline)\", \"test_inline_nonauto_noneditable_pk (admin_inlines.tests.TestInline)\", \"test_inline_primary (admin_inlines.tests.TestInline)\", \"Inlines `show_change_link` for registered models when enabled.\", \"Inlines `show_change_link` disabled for unregistered models.\", \"test_localize_pk_shortcut (admin_inlines.tests.TestInline)\", \"Autogenerated many-to-many inlines are displayed correctly (#13407)\", \"test_min_num (admin_inlines.tests.TestInline)\", \"Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable\", \"test_non_related_name_inline (admin_inlines.tests.TestInline)\", \"Inlines without change permission shows field inputs on add form.\", \"Bug #13174.\", \"test_stacked_inline_edit_form_contains_has_original_class (admin_inlines.tests.TestInline)\", \"test_tabular_inline_column_css_class (admin_inlines.tests.TestInline)\", \"Inlines `show_change_link` disabled by default.\", \"test_tabular_model_form_meta_readonly_field (admin_inlines.tests.TestInline)\", \"test_tabular_non_field_errors (admin_inlines.tests.TestInline)\"]", + "expected_spans": { + "django/forms/widgets.py": [ + "imports", + "Media._css", + "Media._js", + "Media.merge" + ] + }, + "test_file_spans": { + "tests/admin_inlines/tests.py": [ + "TestInlineMedia.test_inline_media_only_inline" + ], + "tests/admin_widgets/test_autocomplete_widget.py": [ + "AutocompleteMixinTests", + "AutocompleteMixinTests.test_media" + ], + "tests/forms_tests/tests/test_media.py": [ + "FormsMediaTestCase.test_construction", + "FormsMediaTestCase.test_combine_media", + "FormsMediaTestCase.test_media_property", + "FormsMediaTestCase.test_media_property_parent_references", + "FormsMediaTestCase.test_media_inheritance", + "FormsMediaTestCase.test_media_inheritance_extends", + "FormsMediaTestCase.test_multi_widget", + "FormsMediaTestCase.test_form_media", + "FormsMediaTestCase.test_merge", + "FormsMediaTestCase.test_merge_warning", + "FormsMediaTestCase.test_merge_js_three_way", + "FormsMediaTestCase.test_merge_css_three_way" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "scikit-learn__scikit-learn-13241", + "repo": "scikit-learn/scikit-learn", + "base_commit": "f8b108d0c6f2f82b2dc4e32a6793f9d9ac9cf2f4", + "problem_statement": "Differences among the results of KernelPCA with rbf kernel\nHi there,\r\nI met with a problem:\r\n\r\n#### Description\r\nWhen I run KernelPCA for dimension reduction for the same datasets, the results are different in signs.\r\n\r\n#### Steps/Code to Reproduce\r\nJust to reduce the dimension to 7 with rbf kernel:\r\npca = KernelPCA(n_components=7, kernel='rbf', copy_X=False, n_jobs=-1)\r\npca.fit_transform(X)\r\n\r\n#### Expected Results\r\nThe same result.\r\n\r\n#### Actual Results\r\nThe results are the same except for their signs:(\r\n[[-0.44457617 -0.18155886 -0.10873474 0.13548386 -0.1437174 -0.057469\t0.18124364]] \r\n\r\n[[ 0.44457617 0.18155886 0.10873474 -0.13548386 -0.1437174 -0.057469 -0.18124364]] \r\n\r\n[[-0.44457617 -0.18155886 0.10873474 0.13548386 0.1437174 0.057469 0.18124364]] \r\n\r\n#### Versions\r\n0.18.1\r\n\n", + "golden_patch": "diff --git a/sklearn/decomposition/kernel_pca.py b/sklearn/decomposition/kernel_pca.py\n--- a/sklearn/decomposition/kernel_pca.py\n+++ b/sklearn/decomposition/kernel_pca.py\n@@ -8,6 +8,7 @@\n from scipy.sparse.linalg import eigsh\n \n from ..utils import check_random_state\n+from ..utils.extmath import svd_flip\n from ..utils.validation import check_is_fitted, check_array\n from ..exceptions import NotFittedError\n from ..base import BaseEstimator, TransformerMixin, _UnstableOn32BitMixin\n@@ -210,6 +211,10 @@ def _fit_transform(self, K):\n maxiter=self.max_iter,\n v0=v0)\n \n+ # flip eigenvectors' sign to enforce deterministic output\n+ self.alphas_, _ = svd_flip(self.alphas_,\n+ np.empty_like(self.alphas_).T)\n+\n # sort eigenvectors in descending order\n indices = self.lambdas_.argsort()[::-1]\n self.lambdas_ = self.lambdas_[indices]\n", + "test_patch": "diff --git a/sklearn/decomposition/tests/test_kernel_pca.py b/sklearn/decomposition/tests/test_kernel_pca.py\n--- a/sklearn/decomposition/tests/test_kernel_pca.py\n+++ b/sklearn/decomposition/tests/test_kernel_pca.py\n@@ -4,7 +4,7 @@\n \n from sklearn.utils.testing import (assert_array_almost_equal, assert_less,\n assert_equal, assert_not_equal,\n- assert_raises)\n+ assert_raises, assert_allclose)\n \n from sklearn.decomposition import PCA, KernelPCA\n from sklearn.datasets import make_circles\n@@ -71,6 +71,21 @@ def test_kernel_pca_consistent_transform():\n assert_array_almost_equal(transformed1, transformed2)\n \n \n+def test_kernel_pca_deterministic_output():\n+ rng = np.random.RandomState(0)\n+ X = rng.rand(10, 10)\n+ eigen_solver = ('arpack', 'dense')\n+\n+ for solver in eigen_solver:\n+ transformed_X = np.zeros((20, 2))\n+ for i in range(20):\n+ kpca = KernelPCA(n_components=2, eigen_solver=solver,\n+ random_state=rng)\n+ transformed_X[i, :] = kpca.fit_transform(X)[0]\n+ assert_allclose(\n+ transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2))\n+\n+\n def test_kernel_pca_sparse():\n rng = np.random.RandomState(0)\n X_fit = sp.csr_matrix(rng.random_sample((5, 4)))\ndiff --git a/sklearn/decomposition/tests/test_pca.py b/sklearn/decomposition/tests/test_pca.py\n--- a/sklearn/decomposition/tests/test_pca.py\n+++ b/sklearn/decomposition/tests/test_pca.py\n@@ -6,6 +6,7 @@\n \n from sklearn.utils.testing import assert_almost_equal\n from sklearn.utils.testing import assert_array_almost_equal\n+from sklearn.utils.testing import assert_allclose\n from sklearn.utils.testing import assert_equal\n from sklearn.utils.testing import assert_greater\n from sklearn.utils.testing import assert_raise_message\n@@ -703,6 +704,19 @@ def test_pca_dtype_preservation(svd_solver):\n check_pca_int_dtype_upcast_to_double(svd_solver)\n \n \n+def test_pca_deterministic_output():\n+ rng = np.random.RandomState(0)\n+ X = rng.rand(10, 10)\n+\n+ for solver in solver_list:\n+ transformed_X = np.zeros((20, 2))\n+ for i in range(20):\n+ pca = PCA(n_components=2, svd_solver=solver, random_state=rng)\n+ transformed_X[i, :] = pca.fit_transform(X)[0]\n+ assert_allclose(\n+ transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2))\n+\n+\n def check_pca_float_dtype_preservation(svd_solver):\n # Ensure that PCA does not upscale the dtype when input is float32\n X_64 = np.random.RandomState(0).rand(1000, 4).astype(np.float64)\n", + "fail_to_pass": "[\"sklearn/decomposition/tests/test_kernel_pca.py::test_kernel_pca_deterministic_output\"]", + "pass_to_pass": "[\"sklearn/decomposition/tests/test_kernel_pca.py::test_kernel_pca\", \"sklearn/decomposition/tests/test_kernel_pca.py::test_kernel_pca_invalid_parameters\", \"sklearn/decomposition/tests/test_kernel_pca.py::test_kernel_pca_consistent_transform\", \"sklearn/decomposition/tests/test_kernel_pca.py::test_kernel_pca_sparse\", \"sklearn/decomposition/tests/test_kernel_pca.py::test_kernel_pca_linear_kernel\", \"sklearn/decomposition/tests/test_kernel_pca.py::test_kernel_pca_n_components\", \"sklearn/decomposition/tests/test_kernel_pca.py::test_remove_zero_eig\", \"sklearn/decomposition/tests/test_kernel_pca.py::test_kernel_pca_precomputed\", \"sklearn/decomposition/tests/test_kernel_pca.py::test_kernel_pca_invalid_kernel\", \"sklearn/decomposition/tests/test_kernel_pca.py::test_gridsearch_pipeline\", \"sklearn/decomposition/tests/test_kernel_pca.py::test_gridsearch_pipeline_precomputed\", \"sklearn/decomposition/tests/test_kernel_pca.py::test_nested_circles\", \"sklearn/decomposition/tests/test_pca.py::test_pca\", \"sklearn/decomposition/tests/test_pca.py::test_pca_arpack_solver\", \"sklearn/decomposition/tests/test_pca.py::test_pca_randomized_solver\", \"sklearn/decomposition/tests/test_pca.py::test_no_empty_slice_warning\", \"sklearn/decomposition/tests/test_pca.py::test_whitening\", \"sklearn/decomposition/tests/test_pca.py::test_explained_variance\", \"sklearn/decomposition/tests/test_pca.py::test_singular_values\", \"sklearn/decomposition/tests/test_pca.py::test_pca_check_projection\", \"sklearn/decomposition/tests/test_pca.py::test_pca_inverse\", \"sklearn/decomposition/tests/test_pca.py::test_pca_validation[full]\", \"sklearn/decomposition/tests/test_pca.py::test_pca_validation[arpack]\", \"sklearn/decomposition/tests/test_pca.py::test_pca_validation[randomized]\", \"sklearn/decomposition/tests/test_pca.py::test_pca_validation[auto]\", \"sklearn/decomposition/tests/test_pca.py::test_n_components_none[full]\", \"sklearn/decomposition/tests/test_pca.py::test_n_components_none[arpack]\", \"sklearn/decomposition/tests/test_pca.py::test_n_components_none[randomized]\", \"sklearn/decomposition/tests/test_pca.py::test_n_components_none[auto]\", \"sklearn/decomposition/tests/test_pca.py::test_randomized_pca_check_projection\", \"sklearn/decomposition/tests/test_pca.py::test_randomized_pca_check_list\", \"sklearn/decomposition/tests/test_pca.py::test_randomized_pca_inverse\", \"sklearn/decomposition/tests/test_pca.py::test_n_components_mle\", \"sklearn/decomposition/tests/test_pca.py::test_pca_dim\", \"sklearn/decomposition/tests/test_pca.py::test_infer_dim_1\", \"sklearn/decomposition/tests/test_pca.py::test_infer_dim_2\", \"sklearn/decomposition/tests/test_pca.py::test_infer_dim_3\", \"sklearn/decomposition/tests/test_pca.py::test_infer_dim_by_explained_variance\", \"sklearn/decomposition/tests/test_pca.py::test_pca_score\", \"sklearn/decomposition/tests/test_pca.py::test_pca_score2\", \"sklearn/decomposition/tests/test_pca.py::test_pca_score3\", \"sklearn/decomposition/tests/test_pca.py::test_pca_score_with_different_solvers\", \"sklearn/decomposition/tests/test_pca.py::test_pca_zero_noise_variance_edge_cases\", \"sklearn/decomposition/tests/test_pca.py::test_svd_solver_auto\", \"sklearn/decomposition/tests/test_pca.py::test_pca_sparse_input[full]\", \"sklearn/decomposition/tests/test_pca.py::test_pca_sparse_input[arpack]\", \"sklearn/decomposition/tests/test_pca.py::test_pca_sparse_input[randomized]\", \"sklearn/decomposition/tests/test_pca.py::test_pca_sparse_input[auto]\", \"sklearn/decomposition/tests/test_pca.py::test_pca_bad_solver\", \"sklearn/decomposition/tests/test_pca.py::test_pca_dtype_preservation[full]\", \"sklearn/decomposition/tests/test_pca.py::test_pca_dtype_preservation[arpack]\", \"sklearn/decomposition/tests/test_pca.py::test_pca_dtype_preservation[randomized]\", \"sklearn/decomposition/tests/test_pca.py::test_pca_dtype_preservation[auto]\", \"sklearn/decomposition/tests/test_pca.py::test_pca_deterministic_output\"]", + "expected_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "imports", + "KernelPCA._fit_transform" + ] + }, + "test_file_spans": { + "sklearn/decomposition/tests/test_kernel_pca.py": [ + "imports", + "test_kernel_pca_sparse" + ], + "sklearn/decomposition/tests/test_pca.py": [ + "imports", + "check_pca_float_dtype_preservation" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform", + "KernelPCA.fit_transform" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform", + "KernelPCA.fit_transform" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "docstring", + "imports", + "KernelPCA.__init__", + "KernelPCA._pairwise", + "KernelPCA._get_kernel", + "KernelPCA._fit_transform", + "KernelPCA._fit_inverse_transform", + "KernelPCA.fit", + "KernelPCA.fit_transform", + "KernelPCA.transform", + "KernelPCA.inverse_transform" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "docstring", + "imports", + "KernelPCA.__init__", + "KernelPCA._pairwise", + "KernelPCA._get_kernel", + "KernelPCA._fit_transform", + "KernelPCA._fit_inverse_transform", + "KernelPCA.fit", + "KernelPCA.fit_transform", + "KernelPCA.transform", + "KernelPCA.inverse_transform" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform", + "KernelPCA.fit_transform" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform", + "KernelPCA.fit_transform" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA.fit_transform" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA.fit_transform" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform", + "KernelPCA.fit_transform" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform", + "KernelPCA.fit_transform" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA", + "KernelPCA._fit_transform", + "KernelPCA.fit_transform" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA", + "KernelPCA._fit_transform", + "KernelPCA.fit_transform" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform", + "KernelPCA._fit_inverse_transform" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform", + "KernelPCA._fit_inverse_transform" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ], + "sklearn/decomposition/tests/test_kernel_pca.py": [ + "test_kernel_pca", + "test_kernel_pca_sparse", + "test_kernel_pca_linear_kernel" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA", + "KernelPCA.fit_transform" + ], + "sklearn/decomposition/tests/test_kernel_pca.py": [ + "test_nested_circles" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA", + "KernelPCA.fit_transform" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA", + "KernelPCA._fit_transform" + ] + }, + "alternative_spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA", + "KernelPCA._fit_transform" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240523_aider", + "spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "run_name": "20240925_hyperagent_lite1", + "spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform", + "KernelPCA.fit_transform" + ] + } + }, + { + "run_name": "20240702_codestory_aide_mixed", + "spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "run_name": "20240829_Isoform", + "spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform", + "KernelPCA.fit_transform" + ] + } + }, + { + "run_name": "20240604_CodeR", + "spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "run_name": "20241028_agentless-1.5_gpt4o", + "spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "run_name": "20240622_Lingma_Agent", + "spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA.fit_transform" + ] + } + }, + { + "run_name": "20240612_IBM_Research_Agent101", + "spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "run_name": "20240621_autocoderover-v20240620", + "spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform", + "KernelPCA.fit_transform" + ] + } + }, + { + "run_name": "20240617_factory_code_droid", + "spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "run_name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA", + "KernelPCA._fit_transform", + "KernelPCA.fit_transform" + ] + } + }, + { + "run_name": "20240912_marscode-agent-dev", + "spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "run_name": "20240509_amazon-q-developer-agent-20240430-dev", + "spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform", + "KernelPCA._fit_inverse_transform" + ] + } + }, + { + "run_name": "20240811_gru", + "spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA._fit_transform" + ] + } + }, + { + "run_name": "20240620_sweagent_claude3.5sonnet", + "spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA", + "KernelPCA.fit_transform" + ] + } + }, + { + "run_name": "20240721_amazon-q-developer-agent-20240719-dev", + "spans": { + "sklearn/decomposition/kernel_pca.py": [ + "KernelPCA", + "KernelPCA._fit_transform" + ] + } + } + ] + }, + { + "instance_id": "sympy__sympy-16106", + "repo": "sympy/sympy", + "base_commit": "0e987498b00167fdd4a08a41c852a97cb70ce8f2", + "problem_statement": "mathml printer for IndexedBase required\nWriting an `Indexed` object to MathML fails with a `TypeError` exception: `TypeError: 'Indexed' object is not iterable`:\r\n\r\n```\r\nIn [340]: sympy.__version__\r\nOut[340]: '1.0.1.dev'\r\n\r\nIn [341]: from sympy.abc import (a, b)\r\n\r\nIn [342]: sympy.printing.mathml(sympy.IndexedBase(a)[b])\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n in ()\r\n----> 1 sympy.printing.mathml(sympy.IndexedBase(a)[b])\r\n\r\n/dev/shm/gerrit/venv/stable-3.5/lib/python3.5/site-packages/sympy/printing/mathml.py in mathml(expr, **settings)\r\n 442 def mathml(expr, **settings):\r\n 443 \"\"\"Returns the MathML representation of expr\"\"\"\r\n--> 444 return MathMLPrinter(settings).doprint(expr)\r\n 445 \r\n 446 \r\n\r\n/dev/shm/gerrit/venv/stable-3.5/lib/python3.5/site-packages/sympy/printing/mathml.py in doprint(self, expr)\r\n 36 Prints the expression as MathML.\r\n 37 \"\"\"\r\n---> 38 mathML = Printer._print(self, expr)\r\n 39 unistr = mathML.toxml()\r\n 40 xmlbstr = unistr.encode('ascii', 'xmlcharrefreplace')\r\n\r\n/dev/shm/gerrit/venv/stable-3.5/lib/python3.5/site-packages/sympy/printing/printer.py in _print(self, expr, *args, **kwargs)\r\n 255 printmethod = '_print_' + cls.__name__\r\n 256 if hasattr(self, printmethod):\r\n--> 257 return getattr(self, printmethod)(expr, *args, **kwargs)\r\n 258 # Unknown object, fall back to the emptyPrinter.\r\n 259 return self.emptyPrinter(expr)\r\n\r\n/dev/shm/gerrit/venv/stable-3.5/lib/python3.5/site-packages/sympy/printing/mathml.py in _print_Basic(self, e)\r\n 356 def _print_Basic(self, e):\r\n 357 x = self.dom.createElement(self.mathml_tag(e))\r\n--> 358 for arg in e:\r\n 359 x.appendChild(self._print(arg))\r\n 360 return x\r\n\r\nTypeError: 'Indexed' object is not iterable\r\n```\r\n\r\nIt also fails for more complex expressions where at least one element is Indexed.\n", + "golden_patch": "diff --git a/sympy/printing/mathml.py b/sympy/printing/mathml.py\n--- a/sympy/printing/mathml.py\n+++ b/sympy/printing/mathml.py\n@@ -1271,6 +1271,26 @@ def _print_Lambda(self, e):\n return x\n \n \n+ def _print_tuple(self, e):\n+ x = self.dom.createElement('mfenced')\n+ for i in e:\n+ x.appendChild(self._print(i))\n+ return x\n+\n+\n+ def _print_IndexedBase(self, e):\n+ return self._print(e.label)\n+\n+ def _print_Indexed(self, e):\n+ x = self.dom.createElement('msub')\n+ x.appendChild(self._print(e.base))\n+ if len(e.indices) == 1:\n+ x.appendChild(self._print(e.indices[0]))\n+ return x\n+ x.appendChild(self._print(e.indices))\n+ return x\n+\n+\n def mathml(expr, printer='content', **settings):\n \"\"\"Returns the MathML representation of expr. If printer is presentation then\n prints Presentation MathML else prints content MathML.\n", + "test_patch": "diff --git a/sympy/printing/tests/test_mathml.py b/sympy/printing/tests/test_mathml.py\n--- a/sympy/printing/tests/test_mathml.py\n+++ b/sympy/printing/tests/test_mathml.py\n@@ -1,7 +1,7 @@\n from sympy import diff, Integral, Limit, sin, Symbol, Integer, Rational, cos, \\\n tan, asin, acos, atan, sinh, cosh, tanh, asinh, acosh, atanh, E, I, oo, \\\n pi, GoldenRatio, EulerGamma, Sum, Eq, Ne, Ge, Lt, Float, Matrix, Basic, S, \\\n- MatrixSymbol, Function, Derivative, log, Lambda\n+ MatrixSymbol, Function, Derivative, log, Lambda, IndexedBase, symbols\n from sympy.core.containers import Tuple\n from sympy.functions.elementary.complexes import re, im, Abs, conjugate\n from sympy.functions.elementary.integers import floor, ceiling\n@@ -1139,3 +1139,17 @@ def test_print_random_symbol():\n R = RandomSymbol(Symbol('R'))\n assert mpp.doprint(R) == 'R'\n assert mp.doprint(R) == 'R'\n+\n+\n+def test_print_IndexedBase():\n+ a,b,c,d,e = symbols('a b c d e')\n+ assert mathml(IndexedBase(a)[b],printer='presentation') == 'ab'\n+ assert mathml(IndexedBase(a)[b,c,d],printer = 'presentation') == 'abcd'\n+ assert mathml(IndexedBase(a)[b]*IndexedBase(c)[d]*IndexedBase(e),printer = 'presentation') == 'abcde'\n+\n+\n+def test_print_Indexed():\n+ a,b,c = symbols('a b c')\n+ assert mathml(IndexedBase(a),printer = 'presentation') == 'a'\n+ assert mathml(IndexedBase(a/b),printer = 'presentation') == 'ab'\n+ assert mathml(IndexedBase((a,b)),printer = 'presentation') == 'ab'\n", + "fail_to_pass": "[\"test_print_IndexedBase\"]", + "pass_to_pass": "[\"test_mathml_printer\", \"test_content_printmethod\", \"test_content_mathml_core\", \"test_content_mathml_functions\", \"test_content_mathml_limits\", \"test_content_mathml_integrals\", \"test_content_mathml_matrices\", \"test_content_mathml_sums\", \"test_content_mathml_tuples\", \"test_content_mathml_add\", \"test_content_mathml_Rational\", \"test_content_mathml_constants\", \"test_content_mathml_trig\", \"test_content_mathml_relational\", \"test_content_symbol\", \"test_content_mathml_greek\", \"test_content_mathml_order\", \"test_content_settings\", \"test_presentation_printmethod\", \"test_presentation_mathml_core\", \"test_presentation_mathml_functions\", \"test_print_derivative\", \"test_presentation_mathml_limits\", \"test_presentation_mathml_integrals\", \"test_presentation_mathml_matrices\", \"test_presentation_mathml_sums\", \"test_presentation_mathml_add\", \"test_presentation_mathml_Rational\", \"test_presentation_mathml_constants\", \"test_presentation_mathml_trig\", \"test_presentation_mathml_relational\", \"test_presentation_symbol\", \"test_presentation_mathml_greek\", \"test_presentation_mathml_order\", \"test_print_tuples\", \"test_print_re_im\", \"test_presentation_settings\", \"test_toprettyxml_hooking\", \"test_print_domains\", \"test_print_expression_with_minus\", \"test_print_AssocOp\", \"test_print_basic\", \"test_ln_notation_print\", \"test_mul_symbol_print\", \"test_print_lerchphi\", \"test_print_polylog\", \"test_print_logic\", \"test_root_notation_print\", \"test_fold_frac_powers_print\", \"test_fold_short_frac_print\", \"test_print_factorials\", \"test_print_Lambda\", \"test_print_conjugate\", \"test_print_matrix_symbol\", \"test_print_random_symbol\"]", + "expected_spans": { + "sympy/printing/mathml.py": [ + "mathml" + ] + }, + "test_file_spans": { + "sympy/printing/tests/test_mathml.py": [ + "imports" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-11039", + "repo": "django/django", + "base_commit": "d5276398046ce4a102776a1e67dcac2884d80dfe", + "problem_statement": "sqlmigrate wraps it's outpout in BEGIN/COMMIT even if the database doesn't support transactional DDL\nDescription\n\t \n\t\t(last modified by Simon Charette)\n\t \nThe migration executor only adds the outer BEGIN/COMMIT \u200bif the migration is atomic and \u200bthe schema editor can rollback DDL but the current sqlmigrate logic only takes migration.atomic into consideration.\nThe issue can be addressed by\nChanging sqlmigrate \u200bassignment of self.output_transaction to consider connection.features.can_rollback_ddl as well.\nAdding a test in tests/migrations/test_commands.py based on \u200ban existing test for non-atomic migrations that mocks connection.features.can_rollback_ddl to False instead of overdidding MIGRATION_MODULES to point to a non-atomic migration.\nI marked the ticket as easy picking because I included the above guidelines but feel free to uncheck it if you deem it inappropriate.\n", + "golden_patch": "diff --git a/django/core/management/commands/sqlmigrate.py b/django/core/management/commands/sqlmigrate.py\n--- a/django/core/management/commands/sqlmigrate.py\n+++ b/django/core/management/commands/sqlmigrate.py\n@@ -55,8 +55,9 @@ def handle(self, *args, **options):\n migration_name, app_label))\n targets = [(app_label, migration.name)]\n \n- # Show begin/end around output only for atomic migrations\n- self.output_transaction = migration.atomic\n+ # Show begin/end around output for atomic migrations, if the database\n+ # supports transactional DDL.\n+ self.output_transaction = migration.atomic and connection.features.can_rollback_ddl\n \n # Make a plan that represents just the requested migrations and show SQL\n # for it\n", + "test_patch": "diff --git a/tests/migrations/test_commands.py b/tests/migrations/test_commands.py\n--- a/tests/migrations/test_commands.py\n+++ b/tests/migrations/test_commands.py\n@@ -536,7 +536,13 @@ def test_sqlmigrate_forwards(self):\n index_op_desc_unique_together = output.find('-- alter unique_together')\n index_tx_end = output.find(connection.ops.end_transaction_sql().lower())\n \n- self.assertGreater(index_tx_start, -1, \"Transaction start not found\")\n+ if connection.features.can_rollback_ddl:\n+ self.assertGreater(index_tx_start, -1, \"Transaction start not found\")\n+ self.assertGreater(\n+ index_tx_end, index_op_desc_unique_together,\n+ \"Transaction end not found or found before operation description (unique_together)\"\n+ )\n+\n self.assertGreater(\n index_op_desc_author, index_tx_start,\n \"Operation description (author) not found or found before transaction start\"\n@@ -553,10 +559,6 @@ def test_sqlmigrate_forwards(self):\n index_op_desc_unique_together, index_op_desc_tribble,\n \"Operation description (unique_together) not found or found before operation description (tribble)\"\n )\n- self.assertGreater(\n- index_tx_end, index_op_desc_unique_together,\n- \"Transaction end not found or found before operation description (unique_together)\"\n- )\n \n @override_settings(MIGRATION_MODULES={\"migrations\": \"migrations.test_migrations\"})\n def test_sqlmigrate_backwards(self):\n@@ -577,7 +579,12 @@ def test_sqlmigrate_backwards(self):\n index_drop_table = output.rfind('drop table')\n index_tx_end = output.find(connection.ops.end_transaction_sql().lower())\n \n- self.assertGreater(index_tx_start, -1, \"Transaction start not found\")\n+ if connection.features.can_rollback_ddl:\n+ self.assertGreater(index_tx_start, -1, \"Transaction start not found\")\n+ self.assertGreater(\n+ index_tx_end, index_op_desc_unique_together,\n+ \"Transaction end not found or found before DROP TABLE\"\n+ )\n self.assertGreater(\n index_op_desc_unique_together, index_tx_start,\n \"Operation description (unique_together) not found or found before transaction start\"\n@@ -595,10 +602,6 @@ def test_sqlmigrate_backwards(self):\n index_drop_table, index_op_desc_author,\n \"DROP TABLE not found or found before operation description (author)\"\n )\n- self.assertGreater(\n- index_tx_end, index_op_desc_unique_together,\n- \"Transaction end not found or found before DROP TABLE\"\n- )\n \n # Cleanup by unmigrating everything\n call_command(\"migrate\", \"migrations\", \"zero\", verbosity=0)\n@@ -616,6 +619,22 @@ def test_sqlmigrate_for_non_atomic_migration(self):\n self.assertNotIn(connection.ops.start_transaction_sql().lower(), queries)\n self.assertNotIn(connection.ops.end_transaction_sql().lower(), queries)\n \n+ @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'})\n+ def test_sqlmigrate_for_non_transactional_databases(self):\n+ \"\"\"\n+ Transaction wrappers aren't shown for databases that don't support\n+ transactional DDL.\n+ \"\"\"\n+ out = io.StringIO()\n+ with mock.patch.object(connection.features, 'can_rollback_ddl', False):\n+ call_command('sqlmigrate', 'migrations', '0001', stdout=out)\n+ output = out.getvalue().lower()\n+ queries = [q.strip() for q in output.splitlines()]\n+ start_transaction_sql = connection.ops.start_transaction_sql()\n+ if start_transaction_sql:\n+ self.assertNotIn(start_transaction_sql.lower(), queries)\n+ self.assertNotIn(connection.ops.end_transaction_sql().lower(), queries)\n+\n @override_settings(\n INSTALLED_APPS=[\n \"migrations.migrations_test_apps.migrated_app\",\n", + "fail_to_pass": "[\"test_sqlmigrate_for_non_transactional_databases (migrations.test_commands.MigrateTests)\"]", + "pass_to_pass": "[\"test_makemigrations_app_name_specified_as_label (migrations.test_commands.AppLabelErrorTests)\", \"test_makemigrations_nonexistent_app_label (migrations.test_commands.AppLabelErrorTests)\", \"test_migrate_app_name_specified_as_label (migrations.test_commands.AppLabelErrorTests)\", \"test_migrate_nonexistent_app_label (migrations.test_commands.AppLabelErrorTests)\", \"test_showmigrations_app_name_specified_as_label (migrations.test_commands.AppLabelErrorTests)\", \"test_showmigrations_nonexistent_app_label (migrations.test_commands.AppLabelErrorTests)\", \"test_sqlmigrate_app_name_specified_as_label (migrations.test_commands.AppLabelErrorTests)\", \"test_sqlmigrate_nonexistent_app_label (migrations.test_commands.AppLabelErrorTests)\", \"test_squashmigrations_app_name_specified_as_label (migrations.test_commands.AppLabelErrorTests)\", \"test_squashmigrations_nonexistent_app_label (migrations.test_commands.AppLabelErrorTests)\", \"--squashed-name specifies the new migration's name.\", \"--squashed-name also works if a start migration is omitted.\", \"test_squashmigrations_initial_attribute (migrations.test_commands.SquashMigrationsTests)\", \"test_squashmigrations_invalid_start (migrations.test_commands.SquashMigrationsTests)\", \"test_squashmigrations_optimizes (migrations.test_commands.SquashMigrationsTests)\", \"test_squashmigrations_squashes (migrations.test_commands.SquashMigrationsTests)\", \"test_squashmigrations_valid_start (migrations.test_commands.SquashMigrationsTests)\", \"test_ticket_23799_squashmigrations_no_optimize (migrations.test_commands.SquashMigrationsTests)\", \"test_failing_migration (migrations.test_commands.MakeMigrationsTests)\", \"test_files_content (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigration_merge_dry_run (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigration_merge_dry_run_verbosity_3 (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_auto_now_add_interactive (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_check (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_conflict_exit (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_consistency_checks_respect_routers (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_default_merge_name (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_disabled_migrations_for_app (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_dry_run (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_dry_run_verbosity_3 (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_empty_connections (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_empty_migration (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_empty_no_app_specified (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_handle_merge (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_inconsistent_history (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_interactive_accept (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_interactive_by_default (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_interactive_reject (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_merge_dont_output_dependency_operations (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_merge_no_conflict (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_migration_path_output (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_migration_path_output_valueerror (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_migrations_announce (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_migrations_modules_nonexistent_toplevel_package (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_migrations_modules_path_not_exist (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_no_apps_initial (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_no_changes (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_no_changes_no_apps (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_no_common_ancestor (migrations.test_commands.MakeMigrationsTests)\", \"Migration directories without an __init__.py file are allowed.\", \"test_makemigrations_non_interactive_no_field_rename (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_non_interactive_no_model_rename (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_non_interactive_not_null_addition (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_non_interactive_not_null_alteration (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_order (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_unspecified_app_with_conflict_merge (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_unspecified_app_with_conflict_no_merge (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_with_custom_name (migrations.test_commands.MakeMigrationsTests)\", \"test_makemigrations_with_invalid_custom_name (migrations.test_commands.MakeMigrationsTests)\", \"test_ambigious_prefix (migrations.test_commands.MigrateTests)\", \"test_app_without_migrations (migrations.test_commands.MigrateTests)\", \"test_migrate (migrations.test_commands.MigrateTests)\", \"test_migrate_conflict_exit (migrations.test_commands.MigrateTests)\", \"test_migrate_fake_initial (migrations.test_commands.MigrateTests)\", \"test_migrate_fake_split_initial (migrations.test_commands.MigrateTests)\", \"test_migrate_inconsistent_history (migrations.test_commands.MigrateTests)\", \"test_migrate_initial_false (migrations.test_commands.MigrateTests)\", \"Tests migrate --plan output.\", \"test_migrate_record_replaced (migrations.test_commands.MigrateTests)\", \"test_migrate_record_squashed (migrations.test_commands.MigrateTests)\", \"test_migrate_syncdb_app_label (migrations.test_commands.MigrateTests)\", \"test_migrate_syncdb_app_with_migrations (migrations.test_commands.MigrateTests)\", \"test_migrate_syncdb_deferred_sql_executed_with_schemaeditor (migrations.test_commands.MigrateTests)\", \"test_migrate_with_system_checks (migrations.test_commands.MigrateTests)\", \"test_regression_22823_unmigrated_fk_to_migrated_model (migrations.test_commands.MigrateTests)\", \"test_showmigrations_list (migrations.test_commands.MigrateTests)\", \"test_showmigrations_no_migrations (migrations.test_commands.MigrateTests)\", \"test_showmigrations_plan (migrations.test_commands.MigrateTests)\", \"test_showmigrations_plan_app_label_no_migrations (migrations.test_commands.MigrateTests)\", \"test_showmigrations_plan_multiple_app_labels (migrations.test_commands.MigrateTests)\", \"test_showmigrations_plan_no_migrations (migrations.test_commands.MigrateTests)\", \"test_showmigrations_plan_single_app_label (migrations.test_commands.MigrateTests)\", \"test_showmigrations_plan_squashed (migrations.test_commands.MigrateTests)\", \"test_showmigrations_unmigrated_app (migrations.test_commands.MigrateTests)\", \"test_sqlmigrate_backwards (migrations.test_commands.MigrateTests)\", \"test_sqlmigrate_for_non_atomic_migration (migrations.test_commands.MigrateTests)\", \"test_sqlmigrate_forwards (migrations.test_commands.MigrateTests)\", \"test_unknown_prefix (migrations.test_commands.MigrateTests)\"]", + "expected_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "test_file_spans": { + "tests/migrations/test_commands.py": [ + "MigrateTests.test_sqlmigrate_forwards", + "MigrateTests.test_sqlmigrate_backwards", + "MigrateTests.test_regression_22823_unmigrated_fk_to_migrated_model" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20231010_rag_claude2", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ], + "tests/migrations/test_commands.py": [ + "MigrateTests.test_migrate_fake_initial" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command", + "Command.add_arguments", + "Command.execute", + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command", + "Command.add_arguments", + "Command.execute", + "Command.handle" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ], + "tests/migrations/test_commands.py": [ + "MigrateTests.test_regression_22823_unmigrated_fk_to_migrated_model" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "imports", + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "imports", + "Command.handle" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command", + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command", + "Command.handle" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ], + "tests/migrations/test_commands.py": [ + "MigrateTests.test_migrate_initial_false" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ], + "tests/migrations/test_commands.py": [ + "MigrateTests.test_sqlmigrate_forwards" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ], + "tests/migrations/test_commands.py": [ + "MigrateTests.test_regression_22823_unmigrated_fk_to_migrated_model" + ] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ], + "tests/migrations/test_commands.py": [] + }, + "alternative_spans": { + "django/core/management/commands/sqlmigrate.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "tests/migrations/test_commands.py": [ + "MigrateTests.test_regression_22823_unmigrated_fk_to_migrated_model", + "MigrateTests.test_migrate" + ] + }, + "alternative_spans": {} + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-11049", + "repo": "django/django", + "base_commit": "17455e924e243e7a55e8a38f45966d8cbb27c273", + "problem_statement": "Correct expected format in invalid DurationField error message\nDescription\n\t\nIf you enter a duration \"14:00\" into a duration field, it translates to \"00:14:00\" which is 14 minutes.\nThe current error message for invalid DurationField says that this should be the format of durations: \"[DD] [HH:[MM:]]ss[.uuuuuu]\". But according to the actual behaviour, it should be: \"[DD] [[HH:]MM:]ss[.uuuuuu]\", because seconds are mandatory, minutes are optional, and hours are optional if minutes are provided.\nThis seems to be a mistake in all Django versions that support the DurationField.\nAlso the duration fields could have a default help_text with the requested format, because the syntax is not self-explanatory.\n", + "golden_patch": "diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -1587,7 +1587,7 @@ class DurationField(Field):\n empty_strings_allowed = False\n default_error_messages = {\n 'invalid': _(\"'%(value)s' value has an invalid format. It must be in \"\n- \"[DD] [HH:[MM:]]ss[.uuuuuu] format.\")\n+ \"[DD] [[HH:]MM:]ss[.uuuuuu] format.\")\n }\n description = _(\"Duration\")\n \n", + "test_patch": "diff --git a/tests/model_fields/test_durationfield.py b/tests/model_fields/test_durationfield.py\n--- a/tests/model_fields/test_durationfield.py\n+++ b/tests/model_fields/test_durationfield.py\n@@ -75,7 +75,7 @@ def test_invalid_string(self):\n self.assertEqual(\n cm.exception.message % cm.exception.params,\n \"'not a datetime' value has an invalid format. \"\n- \"It must be in [DD] [HH:[MM:]]ss[.uuuuuu] format.\"\n+ \"It must be in [DD] [[HH:]MM:]ss[.uuuuuu] format.\"\n )\n \n \n", + "fail_to_pass": "[\"test_invalid_string (model_fields.test_durationfield.TestValidation)\"]", + "pass_to_pass": "[\"test_dumping (model_fields.test_durationfield.TestSerialization)\", \"test_loading (model_fields.test_durationfield.TestSerialization)\", \"test_formfield (model_fields.test_durationfield.TestFormField)\", \"test_exact (model_fields.test_durationfield.TestQuerying)\", \"test_gt (model_fields.test_durationfield.TestQuerying)\", \"test_create_empty (model_fields.test_durationfield.TestSaveLoad)\", \"test_fractional_seconds (model_fields.test_durationfield.TestSaveLoad)\", \"test_simple_roundtrip (model_fields.test_durationfield.TestSaveLoad)\"]", + "expected_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + }, + "test_file_spans": { + "tests/model_fields/test_durationfield.py": [ + "TestValidation.test_invalid_string" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoField.__init__", + "DurationField" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoField.__init__", + "DurationField" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ], + "tests/model_fields/test_durationfield.py": [ + "imports", + "TestValidation.test_invalid_string" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField", + "DurationField.formfield" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField", + "DurationField.formfield" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField", + "DurationField.formfield" + ], + "django/forms/fields.py": [ + "DurationField" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField", + "DurationField.formfield" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField", + "DurationField.formfield" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField", + "DurationField.formfield" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ], + "tests/model_fields/test_durationfield.py": [ + "TestValidation.test_invalid_string" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField", + "DurationField.formfield" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField", + "DurationField.formfield" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ], + "django/forms/fields.py": [ + "DurationField" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "DurationField" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "scikit-learn__scikit-learn-13439", + "repo": "scikit-learn/scikit-learn", + "base_commit": "a62775e99f2a5ea3d51db7160fad783f6cd8a4c5", + "problem_statement": "Pipeline should implement __len__\n#### Description\r\n\r\nWith the new indexing support `pipe[:len(pipe)]` raises an error.\r\n\r\n#### Steps/Code to Reproduce\r\n\r\n```python\r\nfrom sklearn import svm\r\nfrom sklearn.datasets import samples_generator\r\nfrom sklearn.feature_selection import SelectKBest\r\nfrom sklearn.feature_selection import f_regression\r\nfrom sklearn.pipeline import Pipeline\r\n\r\n# generate some data to play with\r\nX, y = samples_generator.make_classification(\r\n n_informative=5, n_redundant=0, random_state=42)\r\n\r\nanova_filter = SelectKBest(f_regression, k=5)\r\nclf = svm.SVC(kernel='linear')\r\npipe = Pipeline([('anova', anova_filter), ('svc', clf)])\r\n\r\nlen(pipe)\r\n```\r\n\r\n#### Versions\r\n\r\n```\r\nSystem:\r\n python: 3.6.7 | packaged by conda-forge | (default, Feb 19 2019, 18:37:23) [GCC 4.2.1 Compatible Clang 4.0.1 (tags/RELEASE_401/final)]\r\nexecutable: /Users/krisz/.conda/envs/arrow36/bin/python\r\n machine: Darwin-18.2.0-x86_64-i386-64bit\r\n\r\nBLAS:\r\n macros: HAVE_CBLAS=None\r\n lib_dirs: /Users/krisz/.conda/envs/arrow36/lib\r\ncblas_libs: openblas, openblas\r\n\r\nPython deps:\r\n pip: 19.0.3\r\nsetuptools: 40.8.0\r\n sklearn: 0.21.dev0\r\n numpy: 1.16.2\r\n scipy: 1.2.1\r\n Cython: 0.29.6\r\n pandas: 0.24.1\r\n```\n", + "golden_patch": "diff --git a/sklearn/pipeline.py b/sklearn/pipeline.py\n--- a/sklearn/pipeline.py\n+++ b/sklearn/pipeline.py\n@@ -199,6 +199,12 @@ def _iter(self, with_final=True):\n if trans is not None and trans != 'passthrough':\n yield idx, name, trans\n \n+ def __len__(self):\n+ \"\"\"\n+ Returns the length of the Pipeline\n+ \"\"\"\n+ return len(self.steps)\n+\n def __getitem__(self, ind):\n \"\"\"Returns a sub-pipeline or a single esimtator in the pipeline\n \n", + "test_patch": "diff --git a/sklearn/tests/test_pipeline.py b/sklearn/tests/test_pipeline.py\n--- a/sklearn/tests/test_pipeline.py\n+++ b/sklearn/tests/test_pipeline.py\n@@ -1069,5 +1069,6 @@ def test_make_pipeline_memory():\n assert pipeline.memory is memory\n pipeline = make_pipeline(DummyTransf(), SVC())\n assert pipeline.memory is None\n+ assert len(pipeline) == 2\n \n shutil.rmtree(cachedir)\n", + "fail_to_pass": "[\"sklearn/tests/test_pipeline.py::test_make_pipeline_memory\"]", + "pass_to_pass": "[\"sklearn/tests/test_pipeline.py::test_pipeline_init\", \"sklearn/tests/test_pipeline.py::test_pipeline_init_tuple\", \"sklearn/tests/test_pipeline.py::test_pipeline_methods_anova\", \"sklearn/tests/test_pipeline.py::test_pipeline_fit_params\", \"sklearn/tests/test_pipeline.py::test_pipeline_sample_weight_supported\", \"sklearn/tests/test_pipeline.py::test_pipeline_sample_weight_unsupported\", \"sklearn/tests/test_pipeline.py::test_pipeline_raise_set_params_error\", \"sklearn/tests/test_pipeline.py::test_pipeline_methods_pca_svm\", \"sklearn/tests/test_pipeline.py::test_pipeline_methods_preprocessing_svm\", \"sklearn/tests/test_pipeline.py::test_fit_predict_on_pipeline\", \"sklearn/tests/test_pipeline.py::test_fit_predict_on_pipeline_without_fit_predict\", \"sklearn/tests/test_pipeline.py::test_fit_predict_with_intermediate_fit_params\", \"sklearn/tests/test_pipeline.py::test_predict_with_predict_params\", \"sklearn/tests/test_pipeline.py::test_feature_union\", \"sklearn/tests/test_pipeline.py::test_make_union\", \"sklearn/tests/test_pipeline.py::test_make_union_kwargs\", \"sklearn/tests/test_pipeline.py::test_pipeline_transform\", \"sklearn/tests/test_pipeline.py::test_pipeline_fit_transform\", \"sklearn/tests/test_pipeline.py::test_pipeline_slice\", \"sklearn/tests/test_pipeline.py::test_pipeline_index\", \"sklearn/tests/test_pipeline.py::test_set_pipeline_steps\", \"sklearn/tests/test_pipeline.py::test_pipeline_named_steps\", \"sklearn/tests/test_pipeline.py::test_pipeline_correctly_adjusts_steps[None]\", \"sklearn/tests/test_pipeline.py::test_pipeline_correctly_adjusts_steps[passthrough]\", \"sklearn/tests/test_pipeline.py::test_set_pipeline_step_passthrough[None]\", \"sklearn/tests/test_pipeline.py::test_set_pipeline_step_passthrough[passthrough]\", \"sklearn/tests/test_pipeline.py::test_pipeline_ducktyping\", \"sklearn/tests/test_pipeline.py::test_make_pipeline\", \"sklearn/tests/test_pipeline.py::test_feature_union_weights\", \"sklearn/tests/test_pipeline.py::test_feature_union_parallel\", \"sklearn/tests/test_pipeline.py::test_feature_union_feature_names\", \"sklearn/tests/test_pipeline.py::test_classes_property\", \"sklearn/tests/test_pipeline.py::test_set_feature_union_steps\", \"sklearn/tests/test_pipeline.py::test_set_feature_union_step_drop[drop]\", \"sklearn/tests/test_pipeline.py::test_set_feature_union_step_drop[None]\", \"sklearn/tests/test_pipeline.py::test_step_name_validation\", \"sklearn/tests/test_pipeline.py::test_set_params_nested_pipeline\", \"sklearn/tests/test_pipeline.py::test_pipeline_wrong_memory\", \"sklearn/tests/test_pipeline.py::test_pipeline_with_cache_attribute\", \"sklearn/tests/test_pipeline.py::test_pipeline_memory\"]", + "expected_spans": { + "sklearn/pipeline.py": [ + "Pipeline.__getitem__" + ] + }, + "test_file_spans": { + "sklearn/tests/test_pipeline.py": [ + "test_make_pipeline_memory" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline.__getitem__" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline.__getitem__" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + } + }, + { + "name": "20231010_rag_claude2", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline.__getitem__" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline.__getitem__" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline.__init__" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline.__init__" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline.__getitem__" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline.__getitem__" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline.__init__", + "Pipeline._validate_steps" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline.__init__", + "Pipeline._validate_steps" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline.get_params" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline.get_params" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline.__getitem__" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline.__getitem__" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline.__init__" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline.__init__" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline.get_params" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline.get_params" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + } + }, + { + "name": "20240402_rag_claude3opus", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline.__init__", + "Pipeline.get_params" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline.__init__", + "Pipeline.get_params" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ], + "sklearn/tests/test_pipeline.py": [ + "test_set_pipeline_steps" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sklearn/pipeline.py": [ + "Pipeline.named_steps" + ] + }, + "alternative_spans": { + "sklearn/pipeline.py": [ + "Pipeline.named_steps" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240523_aider", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + } + }, + { + "run_name": "20240630_agentless_gpt4o", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + } + }, + { + "run_name": "20231010_rag_claude2", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + } + }, + { + "run_name": "20240702_codestory_aide_mixed", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + } + }, + { + "run_name": "20240706_sima_gpt4o", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + } + }, + { + "run_name": "20240623_moatless_claude35sonnet", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + } + }, + { + "run_name": "20240820_honeycomb", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline.__init__" + ] + } + }, + { + "run_name": "20240908_infant_gpt4o", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline.__init__", + "Pipeline._validate_steps" + ] + } + }, + { + "run_name": "20240829_Isoform", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + } + }, + { + "run_name": "20240604_CodeR", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + } + }, + { + "run_name": "20241028_agentless-1.5_gpt4o", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + } + }, + { + "run_name": "20241016_IBM-SWE-1.0", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline.get_params" + ] + } + }, + { + "run_name": "20240612_IBM_Research_Agent101", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline.__init__" + ] + } + }, + { + "run_name": "20240621_autocoderover-v20240620", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline.get_params" + ] + } + }, + { + "run_name": "20240617_factory_code_droid", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + } + }, + { + "run_name": "20240912_marscode-agent-dev", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + } + }, + { + "run_name": "20240617_moatless_gpt4o", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + } + }, + { + "run_name": "20240402_rag_claude3opus", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline" + ] + } + }, + { + "run_name": "20240509_amazon-q-developer-agent-20240430-dev", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline.__init__", + "Pipeline.get_params" + ] + } + }, + { + "run_name": "20240811_gru", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + } + }, + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline._estimator_type" + ] + } + }, + { + "run_name": "20240721_amazon-q-developer-agent-20240719-dev", + "spans": { + "sklearn/pipeline.py": [ + "Pipeline.named_steps" + ] + } + } + ] + }, + { + "instance_id": "sympy__sympy-16281", + "repo": "sympy/sympy", + "base_commit": "41490b75f3621408e0468b0e7b6dc409601fc6ff", + "problem_statement": "Product pretty print could be improved\nThis is what the pretty printing for `Product` looks like:\r\n\r\n```\r\n>>> pprint(Product(1, (n, 1, oo)))\r\n \u221e\r\n\u252c\u2500\u2500\u2500\u252c\r\n\u2502 \u2502 1\r\n\u2502 \u2502\r\nn = 1\r\n>>> pprint(Product(1/n, (n, 1, oo)))\r\n \u221e\r\n\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u252c\r\n\u2502 \u2502 1\r\n\u2502 \u2502 \u2500\r\n\u2502 \u2502 n\r\n\u2502 \u2502\r\n n = 1\r\n>>> pprint(Product(1/n**2, (n, 1, oo)))\r\n \u221e\r\n\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\r\n\u2502 \u2502 1\r\n\u2502 \u2502 \u2500\u2500\r\n\u2502 \u2502 2\r\n\u2502 \u2502 n\r\n\u2502 \u2502\r\n n = 1\r\n>>> pprint(Product(1, (n, 1, oo)), use_unicode=False)\r\n oo\r\n_____\r\n| | 1\r\n| |\r\nn = 1\r\n>>> pprint(Product(1/n, (n, 1, oo)), use_unicode=False)\r\n oo\r\n________\r\n| | 1\r\n| | -\r\n| | n\r\n| |\r\n n = 1\r\n>>> pprint(Product(1/n**2, (n, 1, oo)), use_unicode=False)\r\n oo\r\n__________\r\n| | 1\r\n| | --\r\n| | 2\r\n| | n\r\n| |\r\n n = 1\r\n```\r\n\r\n(if those don't look good in your browser copy paste them into the terminal)\r\n\r\nThis could be improved:\r\n\r\n- Why is there always an empty line at the bottom of the \u220f? Keeping everything below the horizontal line is good, but the bottom looks asymmetric, and it makes the \u220f bigger than it needs to be.\r\n\r\n- The \u220f is too fat IMO. \r\n\r\n- It might look better if we extended the top bar. I'm unsure about this. \r\n\r\nCompare this\r\n\r\n```\r\n \u221e\r\n\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u252c\u2500\r\n \u2502 \u2502 1\r\n \u2502 \u2502 \u2500\u2500\r\n \u2502 \u2502 2\r\n \u2502 \u2502 n\r\n n = 1\r\n```\r\n\r\nThat's still almost twice as wide as the equivalent Sum, but if you make it much skinnier it starts to look bad.\r\n\r\n```\r\n \u221e\r\n ____\r\n \u2572\r\n \u2572 1\r\n \u2572 \u2500\u2500\r\n \u2571 2\r\n \u2571 n\r\n \u2571\r\n \u203e\u203e\u203e\u203e\r\nn = 1\r\n```\n", + "golden_patch": "diff --git a/sympy/printing/pretty/pretty.py b/sympy/printing/pretty/pretty.py\n--- a/sympy/printing/pretty/pretty.py\n+++ b/sympy/printing/pretty/pretty.py\n@@ -491,10 +491,9 @@ def _print_Product(self, expr):\n \n for lim in expr.limits:\n width = (func_height + 2) * 5 // 3 - 2\n- sign_lines = []\n- sign_lines.append(corner_chr + (horizontal_chr*width) + corner_chr)\n- for i in range(func_height + 1):\n- sign_lines.append(vertical_chr + (' '*width) + vertical_chr)\n+ sign_lines = [horizontal_chr + corner_chr + (horizontal_chr * (width-2)) + corner_chr + horizontal_chr]\n+ for _ in range(func_height + 1):\n+ sign_lines.append(' ' + vertical_chr + (' ' * (width-2)) + vertical_chr + ' ')\n \n pretty_sign = stringPict('')\n pretty_sign = prettyForm(*pretty_sign.stack(*sign_lines))\n", + "test_patch": "diff --git a/sympy/printing/pretty/tests/test_pretty.py b/sympy/printing/pretty/tests/test_pretty.py\n--- a/sympy/printing/pretty/tests/test_pretty.py\n+++ b/sympy/printing/pretty/tests/test_pretty.py\n@@ -2054,51 +2054,48 @@ def test_pretty_product():\n unicode_str = \\\n u(\"\"\"\\\n l \\n\\\n-\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c \\n\\\n-\u2502 \u2502 \u239b 2\u239e\\n\\\n-\u2502 \u2502 \u239cn \u239f\\n\\\n-\u2502 \u2502 f\u239c\u2500\u2500\u239f\\n\\\n-\u2502 \u2502 \u239d9 \u23a0\\n\\\n-\u2502 \u2502 \\n\\\n+\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500 \\n\\\n+ \u2502 \u2502 \u239b 2\u239e\\n\\\n+ \u2502 \u2502 \u239cn \u239f\\n\\\n+ \u2502 \u2502 f\u239c\u2500\u2500\u239f\\n\\\n+ \u2502 \u2502 \u239d9 \u23a0\\n\\\n+ \u2502 \u2502 \\n\\\n 2 \\n\\\n n = k \"\"\")\n ascii_str = \\\n \"\"\"\\\n l \\n\\\n __________ \\n\\\n-| | / 2\\\\\\n\\\n-| | |n |\\n\\\n-| | f|--|\\n\\\n-| | \\\\9 /\\n\\\n-| | \\n\\\n+ | | / 2\\\\\\n\\\n+ | | |n |\\n\\\n+ | | f|--|\\n\\\n+ | | \\\\9 /\\n\\\n+ | | \\n\\\n 2 \\n\\\n n = k \"\"\"\n \n- assert pretty(expr) == ascii_str\n- assert upretty(expr) == unicode_str\n-\n expr = Product(f((n/3)**2), (n, k**2, l), (l, 1, m))\n \n unicode_str = \\\n u(\"\"\"\\\n m l \\n\\\n-\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c \u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c \\n\\\n-\u2502 \u2502 \u2502 \u2502 \u239b 2\u239e\\n\\\n-\u2502 \u2502 \u2502 \u2502 \u239cn \u239f\\n\\\n-\u2502 \u2502 \u2502 \u2502 f\u239c\u2500\u2500\u239f\\n\\\n-\u2502 \u2502 \u2502 \u2502 \u239d9 \u23a0\\n\\\n-\u2502 \u2502 \u2502 \u2502 \\n\\\n+\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500 \u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500 \\n\\\n+ \u2502 \u2502 \u2502 \u2502 \u239b 2\u239e\\n\\\n+ \u2502 \u2502 \u2502 \u2502 \u239cn \u239f\\n\\\n+ \u2502 \u2502 \u2502 \u2502 f\u239c\u2500\u2500\u239f\\n\\\n+ \u2502 \u2502 \u2502 \u2502 \u239d9 \u23a0\\n\\\n+ \u2502 \u2502 \u2502 \u2502 \\n\\\n l = 1 2 \\n\\\n n = k \"\"\")\n ascii_str = \\\n \"\"\"\\\n m l \\n\\\n __________ __________ \\n\\\n-| | | | / 2\\\\\\n\\\n-| | | | |n |\\n\\\n-| | | | f|--|\\n\\\n-| | | | \\\\9 /\\n\\\n-| | | | \\n\\\n+ | | | | / 2\\\\\\n\\\n+ | | | | |n |\\n\\\n+ | | | | f|--|\\n\\\n+ | | | | \\\\9 /\\n\\\n+ | | | | \\n\\\n l = 1 2 \\n\\\n n = k \"\"\"\n \n@@ -5514,19 +5511,19 @@ def test_issue_6359():\n 2\n / 2 \\\\ \\n\\\n |______ | \\n\\\n-|| | 2| \\n\\\n-|| | x | \\n\\\n-|| | | \\n\\\n+| | | 2| \\n\\\n+| | | x | \\n\\\n+| | | | \\n\\\n \\\\x = 1 / \\\n \"\"\"\n assert upretty(Product(x**2, (x, 1, 2))**2) == \\\n u(\"\"\"\\\n 2\n \u239b 2 \u239e \\n\\\n-\u239c\u252c\u2500\u2500\u2500\u2500\u252c \u239f \\n\\\n-\u239c\u2502 \u2502 2\u239f \\n\\\n-\u239c\u2502 \u2502 x \u239f \\n\\\n-\u239c\u2502 \u2502 \u239f \\n\\\n+\u239c\u2500\u252c\u2500\u2500\u252c\u2500 \u239f \\n\\\n+\u239c \u2502 \u2502 2\u239f \\n\\\n+\u239c \u2502 \u2502 x \u239f \\n\\\n+\u239c \u2502 \u2502 \u239f \\n\\\n \u239dx = 1 \u23a0 \\\n \"\"\")\n \n", + "fail_to_pass": "[\"test_pretty_product\", \"test_issue_6359\"]", + "pass_to_pass": "[\"test_pretty_ascii_str\", \"test_pretty_unicode_str\", \"test_upretty_greek\", \"test_upretty_multiindex\", \"test_upretty_sub_super\", \"test_upretty_subs_missing_in_24\", \"test_missing_in_2X_issue_9047\", \"test_upretty_modifiers\", \"test_pretty_Cycle\", \"test_pretty_basic\", \"test_negative_fractions\", \"test_issue_5524\", \"test_pretty_ordering\", \"test_EulerGamma\", \"test_GoldenRatio\", \"test_pretty_relational\", \"test_Assignment\", \"test_AugmentedAssignment\", \"test_issue_7117\", \"test_pretty_rational\", \"test_pretty_functions\", \"test_pretty_sqrt\", \"test_pretty_sqrt_char_knob\", \"test_pretty_sqrt_longsymbol_no_sqrt_char\", \"test_pretty_KroneckerDelta\", \"test_pretty_lambda\", \"test_pretty_order\", \"test_pretty_derivatives\", \"test_pretty_integrals\", \"test_pretty_matrix\", \"test_pretty_ndim_arrays\", \"test_tensor_TensorProduct\", \"test_diffgeom_print_WedgeProduct\", \"test_Adjoint\", \"test_pretty_Trace_issue_9044\", \"test_MatrixExpressions\", \"test_pretty_dotproduct\", \"test_pretty_piecewise\", \"test_pretty_ITE\", \"test_pretty_seq\", \"test_any_object_in_sequence\", \"test_print_builtin_set\", \"test_pretty_sets\", \"test_pretty_SetExpr\", \"test_pretty_ImageSet\", \"test_pretty_ConditionSet\", \"test_pretty_ComplexRegion\", \"test_pretty_Union_issue_10414\", \"test_pretty_Intersection_issue_10414\", \"test_ProductSet_paranthesis\", \"test_ProductSet_prod_char_issue_10413\", \"test_pretty_sequences\", \"test_pretty_FourierSeries\", \"test_pretty_FormalPowerSeries\", \"test_pretty_limits\", \"test_pretty_ComplexRootOf\", \"test_pretty_RootSum\", \"test_GroebnerBasis\", \"test_pretty_Boolean\", \"test_pretty_Domain\", \"test_pretty_prec\", \"test_pprint\", \"test_pretty_class\", \"test_pretty_no_wrap_line\", \"test_settings\", \"test_pretty_sum\", \"test_units\", \"test_pretty_Subs\", \"test_gammas\", \"test_beta\", \"test_function_subclass_different_name\", \"test_SingularityFunction\", \"test_deltas\", \"test_hyper\", \"test_meijerg\", \"test_noncommutative\", \"test_pretty_special_functions\", \"test_pretty_geometry\", \"test_expint\", \"test_elliptic_functions\", \"test_RandomDomain\", \"test_PrettyPoly\", \"test_issue_6285\", \"test_issue_6739\", \"test_complicated_symbol_unchanged\", \"test_categories\", \"test_PrettyModules\", \"test_QuotientRing\", \"test_Homomorphism\", \"test_Tr\", \"test_pretty_Add\", \"test_issue_7179\", \"test_issue_7180\", \"test_pretty_Complement\", \"test_pretty_SymmetricDifference\", \"test_pretty_Contains\", \"test_issue_4335\", \"test_issue_6324\", \"test_issue_7927\", \"test_issue_6134\", \"test_issue_9877\", \"test_issue_13651\", \"test_pretty_primenu\", \"test_pretty_primeomega\", \"test_pretty_Mod\", \"test_issue_11801\", \"test_pretty_UnevaluatedExpr\", \"test_issue_10472\", \"test_MatrixElement_printing\", \"test_issue_12675\", \"test_MatrixSymbol_printing\", \"test_degree_printing\", \"test_vector_expr_pretty_printing\", \"test_pretty_print_tensor_expr\", \"test_pretty_print_tensor_partial_deriv\", \"test_issue_15560\", \"test_print_lerchphi\", \"test_issue_15583\", \"test_matrixSymbolBold\", \"test_center_accent\"]", + "expected_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_Product" + ] + }, + "test_file_spans": { + "sympy/printing/pretty/tests/test_pretty.py": [ + "test_pretty_product", + "test_issue_6359" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-11099", + "repo": "django/django", + "base_commit": "d26b2424437dabeeca94d7900b37d2df4410da0c", + "problem_statement": "UsernameValidator allows trailing newline in usernames\nDescription\n\t\nASCIIUsernameValidator and UnicodeUsernameValidator use the regex \nr'^[\\w.@+-]+$'\nThe intent is to only allow alphanumeric characters as well as ., @, +, and -. However, a little known quirk of Python regexes is that $ will also match a trailing newline. Therefore, the user name validators will accept usernames which end with a newline. You can avoid this behavior by instead using \\A and \\Z to terminate regexes. For example, the validator regex could be changed to\nr'\\A[\\w.@+-]+\\Z'\nin order to reject usernames that end with a newline.\nI am not sure how to officially post a patch, but the required change is trivial - using the regex above in the two validators in contrib.auth.validators.\n", + "golden_patch": "diff --git a/django/contrib/auth/validators.py b/django/contrib/auth/validators.py\n--- a/django/contrib/auth/validators.py\n+++ b/django/contrib/auth/validators.py\n@@ -7,7 +7,7 @@\n \n @deconstructible\n class ASCIIUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'^[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only English letters, '\n 'numbers, and @/./+/-/_ characters.'\n@@ -17,7 +17,7 @@ class ASCIIUsernameValidator(validators.RegexValidator):\n \n @deconstructible\n class UnicodeUsernameValidator(validators.RegexValidator):\n- regex = r'^[\\w.@+-]+$'\n+ regex = r'^[\\w.@+-]+\\Z'\n message = _(\n 'Enter a valid username. This value may contain only letters, '\n 'numbers, and @/./+/-/_ characters.'\n", + "test_patch": "diff --git a/tests/auth_tests/test_validators.py b/tests/auth_tests/test_validators.py\n--- a/tests/auth_tests/test_validators.py\n+++ b/tests/auth_tests/test_validators.py\n@@ -237,7 +237,7 @@ def test_unicode_validator(self):\n invalid_usernames = [\n \"o'connell\", \"\u0639\u0628\u062f \u0627\u0644\",\n \"zerowidth\\u200Bspace\", \"nonbreaking\\u00A0space\",\n- \"en\\u2013dash\",\n+ \"en\\u2013dash\", 'trailingnewline\\u000A',\n ]\n v = validators.UnicodeUsernameValidator()\n for valid in valid_usernames:\n@@ -250,7 +250,7 @@ def test_unicode_validator(self):\n \n def test_ascii_validator(self):\n valid_usernames = ['glenn', 'GLEnN', 'jean-marc']\n- invalid_usernames = [\"o'connell\", '\u00c9ric', 'jean marc', \"\u0623\u062d\u0645\u062f\"]\n+ invalid_usernames = [\"o'connell\", '\u00c9ric', 'jean marc', \"\u0623\u062d\u0645\u062f\", 'trailingnewline\\n']\n v = validators.ASCIIUsernameValidator()\n for valid in valid_usernames:\n with self.subTest(valid=valid):\n", + "fail_to_pass": "[\"test_ascii_validator (auth_tests.test_validators.UsernameValidatorsTests)\", \"test_unicode_validator (auth_tests.test_validators.UsernameValidatorsTests)\", \"test_help_text (auth_tests.test_validators.UserAttributeSimilarityValidatorTest)\"]", + "pass_to_pass": "[\"test_help_text (auth_tests.test_validators.MinimumLengthValidatorTest)\", \"test_validate (auth_tests.test_validators.MinimumLengthValidatorTest)\", \"test_help_text (auth_tests.test_validators.NumericPasswordValidatorTest)\", \"test_validate (auth_tests.test_validators.NumericPasswordValidatorTest)\", \"test_validate (auth_tests.test_validators.UserAttributeSimilarityValidatorTest)\", \"test_validate_property (auth_tests.test_validators.UserAttributeSimilarityValidatorTest)\", \"test_empty_password_validator_help_text_html (auth_tests.test_validators.PasswordValidationTest)\", \"test_get_default_password_validators (auth_tests.test_validators.PasswordValidationTest)\", \"test_get_password_validators_custom (auth_tests.test_validators.PasswordValidationTest)\", \"test_password_changed (auth_tests.test_validators.PasswordValidationTest)\", \"test_password_changed_with_custom_validator (auth_tests.test_validators.PasswordValidationTest)\", \"test_password_validators_help_text_html (auth_tests.test_validators.PasswordValidationTest)\", \"test_password_validators_help_text_html_escaping (auth_tests.test_validators.PasswordValidationTest)\", \"test_password_validators_help_texts (auth_tests.test_validators.PasswordValidationTest)\", \"test_validate_password (auth_tests.test_validators.PasswordValidationTest)\", \"test_help_text (auth_tests.test_validators.CommonPasswordValidatorTest)\", \"test_validate (auth_tests.test_validators.CommonPasswordValidatorTest)\", \"test_validate_custom_list (auth_tests.test_validators.CommonPasswordValidatorTest)\", \"test_validate_django_supplied_file (auth_tests.test_validators.CommonPasswordValidatorTest)\"]", + "expected_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "test_file_spans": { + "tests/auth_tests/test_validators.py": [ + "UsernameValidatorsTests.test_unicode_validator", + "UsernameValidatorsTests.test_ascii_validator" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "imports", + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "imports", + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20231010_rag_claude2", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "imports", + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "imports", + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "imports", + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "imports", + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "imports", + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "imports", + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240402_rag_claude3opus", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "imports", + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "imports", + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ], + "tests/forms_tests/tests/test_validators.py": [] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + }, + "alternative_spans": { + "django/contrib/auth/validators.py": [ + "ASCIIUsernameValidator", + "UnicodeUsernameValidator" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "scikit-learn__scikit-learn-13496", + "repo": "scikit-learn/scikit-learn", + "base_commit": "3aefc834dce72e850bff48689bea3c7dff5f3fad", + "problem_statement": "Expose warm_start in Isolation forest\nIt seems to me that `sklearn.ensemble.IsolationForest` supports incremental addition of new trees with the `warm_start` parameter of its parent class, `sklearn.ensemble.BaseBagging`.\r\n\r\nEven though this parameter is not exposed in `__init__()` , it gets inherited from `BaseBagging` and one can use it by changing it to `True` after initialization. To make it work, you have to also increment `n_estimators` on every iteration. \r\n\r\nIt took me a while to notice that it actually works, and I had to inspect the source code of both `IsolationForest` and `BaseBagging`. Also, it looks to me that the behavior is in-line with `sklearn.ensemble.BaseForest` that is behind e.g. `sklearn.ensemble.RandomForestClassifier`.\r\n\r\nTo make it more easier to use, I'd suggest to:\r\n* expose `warm_start` in `IsolationForest.__init__()`, default `False`;\r\n* document it in the same way as it is documented for `RandomForestClassifier`, i.e. say:\r\n```py\r\n warm_start : bool, optional (default=False)\r\n When set to ``True``, reuse the solution of the previous call to fit\r\n and add more estimators to the ensemble, otherwise, just fit a whole\r\n new forest. See :term:`the Glossary `.\r\n```\r\n* add a test to make sure it works properly;\r\n* possibly also mention in the \"IsolationForest example\" documentation entry;\r\n\n", + "golden_patch": "diff --git a/sklearn/ensemble/iforest.py b/sklearn/ensemble/iforest.py\n--- a/sklearn/ensemble/iforest.py\n+++ b/sklearn/ensemble/iforest.py\n@@ -120,6 +120,12 @@ class IsolationForest(BaseBagging, OutlierMixin):\n verbose : int, optional (default=0)\n Controls the verbosity of the tree building process.\n \n+ warm_start : bool, optional (default=False)\n+ When set to ``True``, reuse the solution of the previous call to fit\n+ and add more estimators to the ensemble, otherwise, just fit a whole\n+ new forest. See :term:`the Glossary `.\n+\n+ .. versionadded:: 0.21\n \n Attributes\n ----------\n@@ -173,7 +179,8 @@ def __init__(self,\n n_jobs=None,\n behaviour='old',\n random_state=None,\n- verbose=0):\n+ verbose=0,\n+ warm_start=False):\n super().__init__(\n base_estimator=ExtraTreeRegressor(\n max_features=1,\n@@ -185,6 +192,7 @@ def __init__(self,\n n_estimators=n_estimators,\n max_samples=max_samples,\n max_features=max_features,\n+ warm_start=warm_start,\n n_jobs=n_jobs,\n random_state=random_state,\n verbose=verbose)\n", + "test_patch": "diff --git a/sklearn/ensemble/tests/test_iforest.py b/sklearn/ensemble/tests/test_iforest.py\n--- a/sklearn/ensemble/tests/test_iforest.py\n+++ b/sklearn/ensemble/tests/test_iforest.py\n@@ -295,6 +295,28 @@ def test_score_samples():\n clf2.score_samples([[2., 2.]]))\n \n \n+@pytest.mark.filterwarnings('ignore:default contamination')\n+@pytest.mark.filterwarnings('ignore:behaviour=\"old\"')\n+def test_iforest_warm_start():\n+ \"\"\"Test iterative addition of iTrees to an iForest \"\"\"\n+\n+ rng = check_random_state(0)\n+ X = rng.randn(20, 2)\n+\n+ # fit first 10 trees\n+ clf = IsolationForest(n_estimators=10, max_samples=20,\n+ random_state=rng, warm_start=True)\n+ clf.fit(X)\n+ # remember the 1st tree\n+ tree_1 = clf.estimators_[0]\n+ # fit another 10 trees\n+ clf.set_params(n_estimators=20)\n+ clf.fit(X)\n+ # expecting 20 fitted trees and no overwritten trees\n+ assert len(clf.estimators_) == 20\n+ assert clf.estimators_[0] is tree_1\n+\n+\n @pytest.mark.filterwarnings('ignore:default contamination')\n @pytest.mark.filterwarnings('ignore:behaviour=\"old\"')\n def test_deprecation():\n", + "fail_to_pass": "[\"sklearn/ensemble/tests/test_iforest.py::test_iforest_warm_start\"]", + "pass_to_pass": "[\"sklearn/ensemble/tests/test_iforest.py::test_iforest\", \"sklearn/ensemble/tests/test_iforest.py::test_iforest_sparse\", \"sklearn/ensemble/tests/test_iforest.py::test_iforest_error\", \"sklearn/ensemble/tests/test_iforest.py::test_recalculate_max_depth\", \"sklearn/ensemble/tests/test_iforest.py::test_max_samples_attribute\", \"sklearn/ensemble/tests/test_iforest.py::test_iforest_parallel_regression\", \"sklearn/ensemble/tests/test_iforest.py::test_iforest_performance\", \"sklearn/ensemble/tests/test_iforest.py::test_iforest_works[0.25]\", \"sklearn/ensemble/tests/test_iforest.py::test_iforest_works[auto]\", \"sklearn/ensemble/tests/test_iforest.py::test_max_samples_consistency\", \"sklearn/ensemble/tests/test_iforest.py::test_iforest_subsampled_features\", \"sklearn/ensemble/tests/test_iforest.py::test_iforest_average_path_length\", \"sklearn/ensemble/tests/test_iforest.py::test_score_samples\", \"sklearn/ensemble/tests/test_iforest.py::test_deprecation\", \"sklearn/ensemble/tests/test_iforest.py::test_behaviour_param\", \"sklearn/ensemble/tests/test_iforest.py::test_iforest_chunks_works1[0.25-3]\", \"sklearn/ensemble/tests/test_iforest.py::test_iforest_chunks_works1[auto-2]\", \"sklearn/ensemble/tests/test_iforest.py::test_iforest_chunks_works2[0.25-3]\", \"sklearn/ensemble/tests/test_iforest.py::test_iforest_chunks_works2[auto-2]\"]", + "expected_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + }, + "test_file_spans": { + "sklearn/ensemble/tests/test_iforest.py": [ + "test_deprecation" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "examples/ensemble/plot_isolation_forest.py": [ + "imports" + ], + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ], + "sklearn/ensemble/tests/test_iforest.py": [] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": { + "examples/ensemble/plot_isolation_forest.py": [ + "docstring" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "docstring", + "imports", + "IsolationForest.__init__", + "IsolationForest._set_oob_score", + "IsolationForest._parallel_args", + "IsolationForest.fit", + "IsolationForest.predict", + "IsolationForest.decision_function", + "IsolationForest.score_samples", + "IsolationForest.threshold_", + "IsolationForest._compute_chunked_score_samples", + "IsolationForest._compute_score_samples", + "_average_path_length" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "docstring", + "imports", + "IsolationForest.__init__", + "IsolationForest._set_oob_score", + "IsolationForest._parallel_args", + "IsolationForest.fit", + "IsolationForest.predict", + "IsolationForest.decision_function", + "IsolationForest.score_samples", + "IsolationForest.threshold_", + "IsolationForest._compute_chunked_score_samples", + "IsolationForest._compute_score_samples", + "_average_path_length" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__", + "IsolationForest._set_oob_score" + ], + "sklearn/ensemble/tests/test_iforest.py": [] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__", + "IsolationForest._set_oob_score" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ], + "sklearn/ensemble/tests/test_iforest.py": [ + "test_iforest_chunks_works2" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ], + "sklearn/ensemble/tests/test_iforest.py": [ + "test_iforest_chunks_works2" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "examples/ensemble/plot_isolation_forest.py": [ + "imports" + ], + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ], + "sklearn/ensemble/tests/test_iforest.py": [] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ], + "sklearn/ensemble/tests/test_iforest.py": [] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__", + "IsolationForest.decision_function" + ] + }, + "alternative_spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest", + "IsolationForest.__init__", + "IsolationForest.decision_function" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240530_autocoderover-v20240408", + "spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + } + }, + { + "run_name": "20240702_codestory_aide_mixed", + "spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + } + }, + { + "run_name": "20240706_sima_gpt4o", + "spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + } + }, + { + "run_name": "20240623_moatless_claude35sonnet", + "spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + } + }, + { + "run_name": "20240806_SuperCoder2.0", + "spans": { + "sklearn/ensemble/iforest.py": [ + "docstring", + "imports", + "IsolationForest.__init__", + "IsolationForest._set_oob_score", + "IsolationForest._parallel_args", + "IsolationForest.fit", + "IsolationForest.predict", + "IsolationForest.decision_function", + "IsolationForest.score_samples", + "IsolationForest.threshold_", + "IsolationForest._compute_chunked_score_samples", + "IsolationForest._compute_score_samples", + "_average_path_length" + ] + } + }, + { + "run_name": "20240622_Lingma_Agent", + "spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + } + }, + { + "run_name": "20241016_IBM-SWE-1.0", + "spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + } + }, + { + "run_name": "20240811_gru", + "spans": { + "sklearn/ensemble/iforest.py": [ + "IsolationForest.__init__" + ] + } + } + ] + }, + { + "instance_id": "scikit-learn__scikit-learn-13497", + "repo": "scikit-learn/scikit-learn", + "base_commit": "26f690961a52946dd2f53bf0fdd4264b2ae5be90", + "problem_statement": "Comparing string to array in _estimate_mi\nIn ``_estimate_mi`` there is ``discrete_features == 'auto'`` but discrete features can be an array of indices or a boolean mask.\r\nThis will error in future versions of numpy.\r\nAlso this means we never test this function with discrete features != 'auto', it seems?\n", + "golden_patch": "diff --git a/sklearn/feature_selection/mutual_info_.py b/sklearn/feature_selection/mutual_info_.py\n--- a/sklearn/feature_selection/mutual_info_.py\n+++ b/sklearn/feature_selection/mutual_info_.py\n@@ -10,7 +10,7 @@\n from ..preprocessing import scale\n from ..utils import check_random_state\n from ..utils.fixes import _astype_copy_false\n-from ..utils.validation import check_X_y\n+from ..utils.validation import check_array, check_X_y\n from ..utils.multiclass import check_classification_targets\n \n \n@@ -247,14 +247,16 @@ def _estimate_mi(X, y, discrete_features='auto', discrete_target=False,\n X, y = check_X_y(X, y, accept_sparse='csc', y_numeric=not discrete_target)\n n_samples, n_features = X.shape\n \n- if discrete_features == 'auto':\n- discrete_features = issparse(X)\n-\n- if isinstance(discrete_features, bool):\n+ if isinstance(discrete_features, (str, bool)):\n+ if isinstance(discrete_features, str):\n+ if discrete_features == 'auto':\n+ discrete_features = issparse(X)\n+ else:\n+ raise ValueError(\"Invalid string value for discrete_features.\")\n discrete_mask = np.empty(n_features, dtype=bool)\n discrete_mask.fill(discrete_features)\n else:\n- discrete_features = np.asarray(discrete_features)\n+ discrete_features = check_array(discrete_features, ensure_2d=False)\n if discrete_features.dtype != 'bool':\n discrete_mask = np.zeros(n_features, dtype=bool)\n discrete_mask[discrete_features] = True\n", + "test_patch": "diff --git a/sklearn/feature_selection/tests/test_mutual_info.py b/sklearn/feature_selection/tests/test_mutual_info.py\n--- a/sklearn/feature_selection/tests/test_mutual_info.py\n+++ b/sklearn/feature_selection/tests/test_mutual_info.py\n@@ -183,18 +183,26 @@ def test_mutual_info_options():\n X_csr = csr_matrix(X)\n \n for mutual_info in (mutual_info_regression, mutual_info_classif):\n- assert_raises(ValueError, mutual_info_regression, X_csr, y,\n+ assert_raises(ValueError, mutual_info, X_csr, y,\n discrete_features=False)\n+ assert_raises(ValueError, mutual_info, X, y,\n+ discrete_features='manual')\n+ assert_raises(ValueError, mutual_info, X_csr, y,\n+ discrete_features=[True, False, True])\n+ assert_raises(IndexError, mutual_info, X, y,\n+ discrete_features=[True, False, True, False])\n+ assert_raises(IndexError, mutual_info, X, y, discrete_features=[1, 4])\n \n mi_1 = mutual_info(X, y, discrete_features='auto', random_state=0)\n mi_2 = mutual_info(X, y, discrete_features=False, random_state=0)\n-\n- mi_3 = mutual_info(X_csr, y, discrete_features='auto',\n- random_state=0)\n- mi_4 = mutual_info(X_csr, y, discrete_features=True,\n+ mi_3 = mutual_info(X_csr, y, discrete_features='auto', random_state=0)\n+ mi_4 = mutual_info(X_csr, y, discrete_features=True, random_state=0)\n+ mi_5 = mutual_info(X, y, discrete_features=[True, False, True],\n random_state=0)\n+ mi_6 = mutual_info(X, y, discrete_features=[0, 2], random_state=0)\n \n assert_array_equal(mi_1, mi_2)\n assert_array_equal(mi_3, mi_4)\n+ assert_array_equal(mi_5, mi_6)\n \n assert not np.allclose(mi_1, mi_3)\n", + "fail_to_pass": "[\"sklearn/feature_selection/tests/test_mutual_info.py::test_mutual_info_options\"]", + "pass_to_pass": "[\"sklearn/feature_selection/tests/test_mutual_info.py::test_compute_mi_dd\", \"sklearn/feature_selection/tests/test_mutual_info.py::test_compute_mi_cc\", \"sklearn/feature_selection/tests/test_mutual_info.py::test_compute_mi_cd\", \"sklearn/feature_selection/tests/test_mutual_info.py::test_compute_mi_cd_unique_label\", \"sklearn/feature_selection/tests/test_mutual_info.py::test_mutual_info_classif_discrete\", \"sklearn/feature_selection/tests/test_mutual_info.py::test_mutual_info_regression\", \"sklearn/feature_selection/tests/test_mutual_info.py::test_mutual_info_classif_mixed\"]", + "expected_spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "imports", + "_estimate_mi" + ] + }, + "test_file_spans": { + "sklearn/feature_selection/tests/test_mutual_info.py": [ + "test_mutual_info_options" + ] + }, + "resolved_by": [ + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ], + "sklearn/feature_selection/tests/test_mutual_info.py": [ + "imports" + ] + }, + "alternative_spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + }, + "alternative_spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + }, + "alternative_spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ], + "sklearn/feature_selection/tests/test_mutual_info.py": [] + }, + "alternative_spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + }, + "alternative_spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + }, + "alternative_spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ], + "sklearn/feature_selection/tests/test_mutual_info.py": [ + "test_mutual_info_options" + ] + }, + "alternative_spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240925_hyperagent_lite1", + "spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + } + }, + { + "run_name": "20240702_codestory_aide_mixed", + "spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + } + }, + { + "run_name": "20240623_moatless_claude35sonnet", + "spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + } + }, + { + "run_name": "20240820_honeycomb", + "spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + } + }, + { + "run_name": "20240622_Lingma_Agent", + "spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + } + }, + { + "run_name": "20240617_factory_code_droid", + "spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + } + }, + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "sklearn/feature_selection/mutual_info_.py": [ + "_estimate_mi" + ] + } + } + ] + }, + { + "instance_id": "django__django-11133", + "repo": "django/django", + "base_commit": "879cc3da6249e920b8d54518a0ae06de835d7373", + "problem_statement": "HttpResponse doesn't handle memoryview objects\nDescription\n\t\nI am trying to write a BinaryField retrieved from the database into a HttpResponse. When the database is Sqlite this works correctly, but Postgresql returns the contents of the field as a memoryview object and it seems like current Django doesn't like this combination:\nfrom django.http import HttpResponse\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n# String content\nresponse = HttpResponse(\"My Content\")\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\nresponse.content\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n# Out: b'My Content'\n# This is correct\n# Bytes content\nresponse = HttpResponse(b\"My Content\")\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \nresponse.content\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \n# Out: b'My Content'\n# This is also correct\n# memoryview content\nresponse = HttpResponse(memoryview(b\"My Content\"))\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \nresponse.content\n# Out: b''\n# This is not correct, I am expecting b'My Content'\n", + "golden_patch": "diff --git a/django/http/response.py b/django/http/response.py\n--- a/django/http/response.py\n+++ b/django/http/response.py\n@@ -229,7 +229,7 @@ def make_bytes(self, value):\n # Handle string types -- we can't rely on force_bytes here because:\n # - Python attempts str conversion first\n # - when self._charset != 'utf-8' it re-encodes the content\n- if isinstance(value, bytes):\n+ if isinstance(value, (bytes, memoryview)):\n return bytes(value)\n if isinstance(value, str):\n return bytes(value.encode(self.charset))\n", + "test_patch": "diff --git a/tests/httpwrappers/tests.py b/tests/httpwrappers/tests.py\n--- a/tests/httpwrappers/tests.py\n+++ b/tests/httpwrappers/tests.py\n@@ -366,6 +366,10 @@ def test_non_string_content(self):\n r.content = 12345\n self.assertEqual(r.content, b'12345')\n \n+ def test_memoryview_content(self):\n+ r = HttpResponse(memoryview(b'memoryview'))\n+ self.assertEqual(r.content, b'memoryview')\n+\n def test_iter_content(self):\n r = HttpResponse(['abc', 'def', 'ghi'])\n self.assertEqual(r.content, b'abcdefghi')\n", + "fail_to_pass": "[\"test_memoryview_content (httpwrappers.tests.HttpResponseTests)\"]", + "pass_to_pass": "[\"test_streaming_response (httpwrappers.tests.StreamingHttpResponseTests)\", \"test_cookie_edgecases (httpwrappers.tests.CookieTests)\", \"Semicolons and commas are decoded.\", \"Semicolons and commas are encoded.\", \"test_httponly_after_load (httpwrappers.tests.CookieTests)\", \"test_invalid_cookies (httpwrappers.tests.CookieTests)\", \"test_load_dict (httpwrappers.tests.CookieTests)\", \"test_nonstandard_keys (httpwrappers.tests.CookieTests)\", \"test_pickle (httpwrappers.tests.CookieTests)\", \"test_python_cookies (httpwrappers.tests.CookieTests)\", \"test_repeated_nonstandard_keys (httpwrappers.tests.CookieTests)\", \"test_samesite (httpwrappers.tests.CookieTests)\", \"test_response (httpwrappers.tests.FileCloseTests)\", \"test_streaming_response (httpwrappers.tests.FileCloseTests)\", \"test_json_response_custom_encoder (httpwrappers.tests.JsonResponseTests)\", \"test_json_response_list (httpwrappers.tests.JsonResponseTests)\", \"test_json_response_non_ascii (httpwrappers.tests.JsonResponseTests)\", \"test_json_response_passing_arguments_to_json_dumps (httpwrappers.tests.JsonResponseTests)\", \"test_json_response_raises_type_error_with_default_setting (httpwrappers.tests.JsonResponseTests)\", \"test_json_response_text (httpwrappers.tests.JsonResponseTests)\", \"test_json_response_uuid (httpwrappers.tests.JsonResponseTests)\", \"test_invalid_redirect_repr (httpwrappers.tests.HttpResponseSubclassesTests)\", \"test_not_allowed (httpwrappers.tests.HttpResponseSubclassesTests)\", \"test_not_allowed_repr (httpwrappers.tests.HttpResponseSubclassesTests)\", \"test_not_allowed_repr_no_content_type (httpwrappers.tests.HttpResponseSubclassesTests)\", \"test_not_modified (httpwrappers.tests.HttpResponseSubclassesTests)\", \"test_not_modified_repr (httpwrappers.tests.HttpResponseSubclassesTests)\", \"test_redirect (httpwrappers.tests.HttpResponseSubclassesTests)\", \"Make sure HttpResponseRedirect works with lazy strings.\", \"test_redirect_repr (httpwrappers.tests.HttpResponseSubclassesTests)\", \"test_dict_behavior (httpwrappers.tests.HttpResponseTests)\", \"test_file_interface (httpwrappers.tests.HttpResponseTests)\", \"test_headers_type (httpwrappers.tests.HttpResponseTests)\", \"test_iter_content (httpwrappers.tests.HttpResponseTests)\", \"test_iterator_isnt_rewound (httpwrappers.tests.HttpResponseTests)\", \"test_lazy_content (httpwrappers.tests.HttpResponseTests)\", \"test_long_line (httpwrappers.tests.HttpResponseTests)\", \"test_newlines_in_headers (httpwrappers.tests.HttpResponseTests)\", \"test_non_string_content (httpwrappers.tests.HttpResponseTests)\", \"test_stream_interface (httpwrappers.tests.HttpResponseTests)\", \"test_unsafe_redirect (httpwrappers.tests.HttpResponseTests)\", \"test_basic_mutable_operations (httpwrappers.tests.QueryDictTests)\", \"test_create_with_no_args (httpwrappers.tests.QueryDictTests)\", \"test_duplicates_in_fromkeys_iterable (httpwrappers.tests.QueryDictTests)\", \"test_fromkeys_empty_iterable (httpwrappers.tests.QueryDictTests)\", \"test_fromkeys_is_immutable_by_default (httpwrappers.tests.QueryDictTests)\", \"test_fromkeys_mutable_override (httpwrappers.tests.QueryDictTests)\", \"test_fromkeys_noniterable (httpwrappers.tests.QueryDictTests)\", \"test_fromkeys_with_nondefault_encoding (httpwrappers.tests.QueryDictTests)\", \"test_fromkeys_with_nonempty_value (httpwrappers.tests.QueryDictTests)\", \"test_immutability (httpwrappers.tests.QueryDictTests)\", \"test_immutable_basic_operations (httpwrappers.tests.QueryDictTests)\", \"test_immutable_get_with_default (httpwrappers.tests.QueryDictTests)\", \"test_missing_key (httpwrappers.tests.QueryDictTests)\", \"Test QueryDict with two key/value pairs with same keys.\", \"A copy of a QueryDict is mutable.\", \"test_mutable_delete (httpwrappers.tests.QueryDictTests)\", \"#13572 - QueryDict with a non-default encoding\", \"test_pickle (httpwrappers.tests.QueryDictTests)\", \"test_querydict_fromkeys (httpwrappers.tests.QueryDictTests)\", \"Test QueryDict with one key/value pair\", \"Regression test for #8278: QueryDict.update(QueryDict)\", \"test_urlencode (httpwrappers.tests.QueryDictTests)\", \"test_urlencode_int (httpwrappers.tests.QueryDictTests)\"]", + "expected_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + }, + "test_file_spans": { + "tests/httpwrappers/tests.py": [ + "HttpResponseTests.test_iter_content" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "BinaryField.get_db_prep_value" + ], + "django/http/response.py": [ + "HttpResponseBase.make_bytes", + "HttpResponse.content_3" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes", + "HttpResponse.content_3" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/http/response.py": [ + "imports", + "HttpResponseBase", + "HttpResponseBase.__init__", + "HttpResponseBase.reason_phrase", + "HttpResponseBase.charset", + "HttpResponseBase.serialize_headers", + "HttpResponseBase:5", + "HttpResponseBase._content_type_for_repr", + "HttpResponseBase._convert_to_charset", + "HttpResponseBase.__setitem__", + "HttpResponseBase.has_header", + "HttpResponseBase.set_cookie", + "HttpResponseBase.delete_cookie", + "HttpResponseBase.make_bytes", + "HttpResponseBase.write", + "HttpResponseBase.flush", + "HttpResponseBase.tell", + "HttpResponseBase.writelines", + "HttpResponse", + "HttpResponse.__init__", + "HttpResponse.__repr__", + "HttpResponse.serialize", + "HttpResponse.content_3", + "StreamingHttpResponse", + "StreamingHttpResponse.__init__", + "StreamingHttpResponse.content", + "StreamingHttpResponse._set_streaming_content", + "FileResponse._set_streaming_content", + "FileResponse.set_headers", + "FileResponse", + "HttpResponseRedirectBase", + "HttpResponseRedirectBase.__init__", + "HttpResponseRedirectBase:4", + "HttpResponseRedirectBase.__repr__", + "HttpResponseNotModified.content", + "HttpResponseNotAllowed", + "HttpResponseNotAllowed.__repr__", + "JsonResponse.__init__" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "imports", + "HttpResponseBase", + "HttpResponseBase.__init__", + "HttpResponseBase.reason_phrase", + "HttpResponseBase.charset", + "HttpResponseBase.serialize_headers", + "HttpResponseBase:5", + "HttpResponseBase._content_type_for_repr", + "HttpResponseBase._convert_to_charset", + "HttpResponseBase.__setitem__", + "HttpResponseBase.has_header", + "HttpResponseBase.set_cookie", + "HttpResponseBase.delete_cookie", + "HttpResponseBase.make_bytes", + "HttpResponseBase.write", + "HttpResponseBase.flush", + "HttpResponseBase.tell", + "HttpResponseBase.writelines", + "HttpResponse", + "HttpResponse.__init__", + "HttpResponse.__repr__", + "HttpResponse.serialize", + "HttpResponse.content_3", + "StreamingHttpResponse", + "StreamingHttpResponse.__init__", + "StreamingHttpResponse.content", + "StreamingHttpResponse._set_streaming_content", + "FileResponse._set_streaming_content", + "FileResponse.set_headers", + "FileResponse", + "HttpResponseRedirectBase", + "HttpResponseRedirectBase.__init__", + "HttpResponseRedirectBase:4", + "HttpResponseRedirectBase.__repr__", + "HttpResponseNotModified.content", + "HttpResponseNotAllowed", + "HttpResponseNotAllowed.__repr__", + "JsonResponse.__init__" + ] + } + }, + { + "name": "20240402_rag_gpt4", + "updated_spans": { + "django/http/response.py": [ + "HttpResponse.__init__" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponse.__init__" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes", + "HttpResponse.content_3" + ], + "tests/httpwrappers/tests.py": [ + "StreamingHttpResponseTests" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes", + "HttpResponse.content_3" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + } + }, + { + "name": "20240402_rag_claude3opus", + "updated_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ], + "tests/responses/tests.py": [] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes", + "HttpResponse.content_3" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponseBase.make_bytes", + "HttpResponse.content_3" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + }, + "alternative_spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240524_opencsg_starship_gpt4", + "spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "run_name": "20240630_agentless_gpt4o", + "spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "run_name": "20240828_autose_mixed", + "spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "run_name": "20240615_appmap-navie_gpt4o", + "spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "run_name": "20240530_autocoderover-v20240408", + "spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "run_name": "20240706_sima_gpt4o", + "spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "run_name": "20240402_rag_gpt4", + "spans": { + "django/http/response.py": [ + "HttpResponse.__init__" + ] + } + }, + { + "run_name": "20240808_RepoGraph_gpt4o", + "spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "run_name": "20240829_Isoform", + "spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "run_name": "20240728_sweagent_gpt4o", + "spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "run_name": "20240612_IBM_Research_Agent101", + "spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "run_name": "20240621_autocoderover-v20240620", + "spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "run_name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "run_name": "20240402_sweagent_gpt4", + "spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "run_name": "20240811_gru", + "spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + }, + { + "run_name": "20240721_amazon-q-developer-agent-20240719-dev", + "spans": { + "django/http/response.py": [ + "HttpResponse.content_3" + ] + } + } + ] + }, + { + "instance_id": "sympy__sympy-16503", + "repo": "sympy/sympy", + "base_commit": "a7e6f093c98a3c4783848a19fce646e32b6e0161", + "problem_statement": "Bad centering for Sum pretty print\n```\r\n>>> pprint(Sum(x, (x, 1, oo)) + 3)\r\n \u221e\r\n ___\r\n \u2572\r\n \u2572 x\r\n \u2571 + 3\r\n \u2571\r\n \u203e\u203e\u203e\r\nx = 1\r\n```\r\n\r\nThe `x` and the `+ 3` should be aligned. I'm not sure if the `x` should be lower of if the `+ 3` should be higher. \n", + "golden_patch": "diff --git a/sympy/printing/pretty/pretty.py b/sympy/printing/pretty/pretty.py\n--- a/sympy/printing/pretty/pretty.py\n+++ b/sympy/printing/pretty/pretty.py\n@@ -564,7 +564,7 @@ def adjust(s, wid=None, how='<^>'):\n for i in reversed(range(1, d)):\n lines.append('%s/%s' % (' '*i, ' '*(w - i)))\n lines.append(\"/\" + \"_\"*(w - 1) + ',')\n- return d, h + more, lines, 0\n+ return d, h + more, lines, more\n else:\n w = w + more\n d = d + more\n@@ -619,7 +619,7 @@ def adjust(s, wid=None, how='<^>'):\n if first:\n # change F baseline so it centers on the sign\n prettyF.baseline -= d - (prettyF.height()//2 -\n- prettyF.baseline) - adjustment\n+ prettyF.baseline)\n first = False\n \n # put padding to the right\n@@ -629,7 +629,11 @@ def adjust(s, wid=None, how='<^>'):\n # put the present prettyF to the right\n prettyF = prettyForm(*prettySign.right(prettyF))\n \n- prettyF.baseline = max_upper + sign_height//2\n+ # adjust baseline of ascii mode sigma with an odd height so that it is\n+ # exactly through the center\n+ ascii_adjustment = ascii_mode if not adjustment else 0\n+ prettyF.baseline = max_upper + sign_height//2 + ascii_adjustment\n+\n prettyF.binding = prettyForm.MUL\n return prettyF\n \n", + "test_patch": "diff --git a/sympy/printing/pretty/tests/test_pretty.py b/sympy/printing/pretty/tests/test_pretty.py\n--- a/sympy/printing/pretty/tests/test_pretty.py\n+++ b/sympy/printing/pretty/tests/test_pretty.py\n@@ -4423,14 +4423,14 @@ def test_pretty_sum():\n n \\n\\\n ______ \\n\\\n \u2572 \\n\\\n- \u2572 \u221e \\n\\\n- \u2572 \u2320 \\n\\\n- \u2572 \u23ae n \\n\\\n- \u2572 \u23ae x dx\\n\\\n- \u2571 \u2321 \\n\\\n- \u2571 -\u221e \\n\\\n- \u2571 k \\n\\\n- \u2571 \\n\\\n+ \u2572 \\n\\\n+ \u2572 \u221e \\n\\\n+ \u2572 \u2320 \\n\\\n+ \u2572 \u23ae n \\n\\\n+ \u2571 \u23ae x dx\\n\\\n+ \u2571 \u2321 \\n\\\n+ \u2571 -\u221e \\n\\\n+ \u2571 k \\n\\\n \u2571 \\n\\\n \u203e\u203e\u203e\u203e\u203e\u203e \\n\\\n k = 0 \\\n@@ -4474,14 +4474,14 @@ def test_pretty_sum():\n -\u221e \\n\\\n ______ \\n\\\n \u2572 \\n\\\n- \u2572 \u221e \\n\\\n- \u2572 \u2320 \\n\\\n- \u2572 \u23ae n \\n\\\n- \u2572 \u23ae x dx\\n\\\n- \u2571 \u2321 \\n\\\n- \u2571 -\u221e \\n\\\n- \u2571 k \\n\\\n- \u2571 \\n\\\n+ \u2572 \\n\\\n+ \u2572 \u221e \\n\\\n+ \u2572 \u2320 \\n\\\n+ \u2572 \u23ae n \\n\\\n+ \u2571 \u23ae x dx\\n\\\n+ \u2571 \u2321 \\n\\\n+ \u2571 -\u221e \\n\\\n+ \u2571 k \\n\\\n \u2571 \\n\\\n \u203e\u203e\u203e\u203e\u203e\u203e \\n\\\n k = 0 \\\n@@ -4527,14 +4527,14 @@ def test_pretty_sum():\n -\u221e \\n\\\n ______ \\n\\\n \u2572 \\n\\\n- \u2572 \u221e \\n\\\n- \u2572 \u2320 \\n\\\n- \u2572 \u23ae n \\n\\\n- \u2572 \u23ae x dx\\n\\\n- \u2571 \u2321 \\n\\\n- \u2571 -\u221e \\n\\\n- \u2571 k \\n\\\n- \u2571 \\n\\\n+ \u2572 \\n\\\n+ \u2572 \u221e \\n\\\n+ \u2572 \u2320 \\n\\\n+ \u2572 \u23ae n \\n\\\n+ \u2571 \u23ae x dx\\n\\\n+ \u2571 \u2321 \\n\\\n+ \u2571 -\u221e \\n\\\n+ \u2571 k \\n\\\n \u2571 \\n\\\n \u203e\u203e\u203e\u203e\u203e\u203e \\n\\\n 2 2 1 x \\n\\\n@@ -4572,14 +4572,14 @@ def test_pretty_sum():\n x n \\n\\\n ______ \\n\\\n \u2572 \\n\\\n- \u2572 \u221e \\n\\\n- \u2572 \u2320 \\n\\\n- \u2572 \u23ae n \\n\\\n- \u2572 \u23ae x dx\\n\\\n- \u2571 \u2321 \\n\\\n- \u2571 -\u221e \\n\\\n- \u2571 k \\n\\\n- \u2571 \\n\\\n+ \u2572 \\n\\\n+ \u2572 \u221e \\n\\\n+ \u2572 \u2320 \\n\\\n+ \u2572 \u23ae n \\n\\\n+ \u2571 \u23ae x dx\\n\\\n+ \u2571 \u2321 \\n\\\n+ \u2571 -\u221e \\n\\\n+ \u2571 k \\n\\\n \u2571 \\n\\\n \u203e\u203e\u203e\u203e\u203e\u203e \\n\\\n k = 0 \\\n@@ -4602,8 +4602,8 @@ def test_pretty_sum():\n \u221e \\n\\\n ___ \\n\\\n \u2572 \\n\\\n- \u2572 x\\n\\\n- \u2571 \\n\\\n+ \u2572 \\n\\\n+ \u2571 x\\n\\\n \u2571 \\n\\\n \u203e\u203e\u203e \\n\\\n x = 0 \\\n@@ -4655,10 +4655,10 @@ def test_pretty_sum():\n \u221e \\n\\\n ____ \\n\\\n \u2572 \\n\\\n- \u2572 x\\n\\\n- \u2572 \u2500\\n\\\n- \u2571 2\\n\\\n- \u2571 \\n\\\n+ \u2572 \\n\\\n+ \u2572 x\\n\\\n+ \u2571 \u2500\\n\\\n+ \u2571 2\\n\\\n \u2571 \\n\\\n \u203e\u203e\u203e\u203e \\n\\\n x = 0 \\\n@@ -4716,12 +4716,12 @@ def test_pretty_sum():\n \u221e \\n\\\n _____ \\n\\\n \u2572 \\n\\\n- \u2572 n\\n\\\n- \u2572 \u239b x\u239e \\n\\\n- \u2572 \u239c \u2500\u239f \\n\\\n- \u2571 \u239c 3 2\u239f \\n\\\n- \u2571 \u239dx \u22c5y \u23a0 \\n\\\n- \u2571 \\n\\\n+ \u2572 \\n\\\n+ \u2572 n\\n\\\n+ \u2572 \u239b x\u239e \\n\\\n+ \u2571 \u239c \u2500\u239f \\n\\\n+ \u2571 \u239c 3 2\u239f \\n\\\n+ \u2571 \u239dx \u22c5y \u23a0 \\n\\\n \u2571 \\n\\\n \u203e\u203e\u203e\u203e\u203e \\n\\\n x = 0 \\\n@@ -4844,14 +4844,14 @@ def test_pretty_sum():\n \u221e n \\n\\\n ______ ______ \\n\\\n \u2572 \u2572 \\n\\\n- \u2572 \u2572 \u239b 1 \u239e \\n\\\n- \u2572 \u2572 \u239c1 + \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u239f \\n\\\n- \u2572 \u2572 \u239c 1 \u239f \\n\\\n- \u2572 \u2572 \u239c 1 + \u2500\u2500\u2500\u2500\u2500\u239f 1 \\n\\\n- \u2571 \u2571 \u239c 1\u239f + \u2500\u2500\u2500\u2500\u2500\\n\\\n- \u2571 \u2571 \u239c 1 + \u2500\u239f 1\\n\\\n- \u2571 \u2571 \u239d k\u23a0 1 + \u2500\\n\\\n- \u2571 \u2571 k\\n\\\n+ \u2572 \u2572 \\n\\\n+ \u2572 \u2572 \u239b 1 \u239e \\n\\\n+ \u2572 \u2572 \u239c1 + \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u239f \\n\\\n+ \u2572 \u2572 \u239c 1 \u239f 1 \\n\\\n+ \u2571 \u2571 \u239c 1 + \u2500\u2500\u2500\u2500\u2500\u239f + \u2500\u2500\u2500\u2500\u2500\\n\\\n+ \u2571 \u2571 \u239c 1\u239f 1\\n\\\n+ \u2571 \u2571 \u239c 1 + \u2500\u239f 1 + \u2500\\n\\\n+ \u2571 \u2571 \u239d k\u23a0 k\\n\\\n \u2571 \u2571 \\n\\\n \u203e\u203e\u203e\u203e\u203e\u203e \u203e\u203e\u203e\u203e\u203e\u203e \\n\\\n 1 k = 111 \\n\\\n", + "fail_to_pass": "[\"test_pretty_sum\"]", + "pass_to_pass": "[\"test_pretty_ascii_str\", \"test_pretty_unicode_str\", \"test_upretty_greek\", \"test_upretty_multiindex\", \"test_upretty_sub_super\", \"test_upretty_subs_missing_in_24\", \"test_missing_in_2X_issue_9047\", \"test_upretty_modifiers\", \"test_pretty_Cycle\", \"test_pretty_basic\", \"test_negative_fractions\", \"test_issue_5524\", \"test_pretty_ordering\", \"test_EulerGamma\", \"test_GoldenRatio\", \"test_pretty_relational\", \"test_Assignment\", \"test_AugmentedAssignment\", \"test_issue_7117\", \"test_pretty_rational\", \"test_pretty_functions\", \"test_pretty_sqrt\", \"test_pretty_sqrt_char_knob\", \"test_pretty_sqrt_longsymbol_no_sqrt_char\", \"test_pretty_KroneckerDelta\", \"test_pretty_product\", \"test_pretty_lambda\", \"test_pretty_order\", \"test_pretty_derivatives\", \"test_pretty_integrals\", \"test_pretty_matrix\", \"test_pretty_ndim_arrays\", \"test_tensor_TensorProduct\", \"test_diffgeom_print_WedgeProduct\", \"test_Adjoint\", \"test_pretty_Trace_issue_9044\", \"test_MatrixExpressions\", \"test_pretty_dotproduct\", \"test_pretty_piecewise\", \"test_pretty_ITE\", \"test_pretty_seq\", \"test_any_object_in_sequence\", \"test_print_builtin_set\", \"test_pretty_sets\", \"test_pretty_SetExpr\", \"test_pretty_ImageSet\", \"test_pretty_ConditionSet\", \"test_pretty_ComplexRegion\", \"test_pretty_Union_issue_10414\", \"test_pretty_Intersection_issue_10414\", \"test_ProductSet_paranthesis\", \"test_ProductSet_prod_char_issue_10413\", \"test_pretty_sequences\", \"test_pretty_FourierSeries\", \"test_pretty_FormalPowerSeries\", \"test_pretty_limits\", \"test_pretty_ComplexRootOf\", \"test_pretty_RootSum\", \"test_GroebnerBasis\", \"test_pretty_Boolean\", \"test_pretty_Domain\", \"test_pretty_prec\", \"test_pprint\", \"test_pretty_class\", \"test_pretty_no_wrap_line\", \"test_settings\", \"test_units\", \"test_pretty_Subs\", \"test_gammas\", \"test_beta\", \"test_function_subclass_different_name\", \"test_SingularityFunction\", \"test_deltas\", \"test_hyper\", \"test_meijerg\", \"test_noncommutative\", \"test_pretty_special_functions\", \"test_pretty_geometry\", \"test_expint\", \"test_elliptic_functions\", \"test_RandomDomain\", \"test_PrettyPoly\", \"test_issue_6285\", \"test_issue_6359\", \"test_issue_6739\", \"test_complicated_symbol_unchanged\", \"test_categories\", \"test_PrettyModules\", \"test_QuotientRing\", \"test_Homomorphism\", \"test_Tr\", \"test_pretty_Add\", \"test_issue_7179\", \"test_issue_7180\", \"test_pretty_Complement\", \"test_pretty_SymmetricDifference\", \"test_pretty_Contains\", \"test_issue_4335\", \"test_issue_6324\", \"test_issue_7927\", \"test_issue_6134\", \"test_issue_9877\", \"test_issue_13651\", \"test_pretty_primenu\", \"test_pretty_primeomega\", \"test_pretty_Mod\", \"test_issue_11801\", \"test_pretty_UnevaluatedExpr\", \"test_issue_10472\", \"test_MatrixElement_printing\", \"test_issue_12675\", \"test_MatrixSymbol_printing\", \"test_degree_printing\", \"test_vector_expr_pretty_printing\", \"test_pretty_print_tensor_expr\", \"test_pretty_print_tensor_partial_deriv\", \"test_issue_15560\", \"test_print_lerchphi\", \"test_issue_15583\", \"test_matrixSymbolBold\", \"test_center_accent\"]", + "expected_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_Sum" + ] + }, + "test_file_spans": { + "sympy/printing/pretty/tests/test_pretty.py": [ + "test_pretty_sum" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-11179", + "repo": "django/django", + "base_commit": "19fc6376ce67d01ca37a91ef2f55ef769f50513a", + "problem_statement": "delete() on instances of models without any dependencies doesn't clear PKs.\nDescription\n\t\nDeleting any model with no dependencies not updates the PK on the model. It should be set to None after .delete() call.\nSee Django.db.models.deletion:276-281. Should update the model line 280.\n", + "golden_patch": "diff --git a/django/db/models/deletion.py b/django/db/models/deletion.py\n--- a/django/db/models/deletion.py\n+++ b/django/db/models/deletion.py\n@@ -277,6 +277,7 @@ def delete(self):\n if self.can_fast_delete(instance):\n with transaction.mark_for_rollback_on_error():\n count = sql.DeleteQuery(model).delete_batch([instance.pk], self.using)\n+ setattr(instance, model._meta.pk.attname, None)\n return count, {model._meta.label: count}\n \n with transaction.atomic(using=self.using, savepoint=False):\n", + "test_patch": "diff --git a/tests/delete/tests.py b/tests/delete/tests.py\n--- a/tests/delete/tests.py\n+++ b/tests/delete/tests.py\n@@ -1,6 +1,7 @@\n from math import ceil\n \n from django.db import IntegrityError, connection, models\n+from django.db.models.deletion import Collector\n from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE\n from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature\n \n@@ -471,6 +472,14 @@ def test_fast_delete_qs(self):\n self.assertEqual(User.objects.count(), 1)\n self.assertTrue(User.objects.filter(pk=u2.pk).exists())\n \n+ def test_fast_delete_instance_set_pk_none(self):\n+ u = User.objects.create()\n+ # User can be fast-deleted.\n+ collector = Collector(using='default')\n+ self.assertTrue(collector.can_fast_delete(u))\n+ u.delete()\n+ self.assertIsNone(u.pk)\n+\n def test_fast_delete_joined_qs(self):\n a = Avatar.objects.create(desc='a')\n User.objects.create(avatar=a)\n", + "fail_to_pass": "[\"test_fast_delete_instance_set_pk_none (delete.tests.FastDeleteTests)\"]", + "pass_to_pass": "[\"test_fast_delete_empty_no_update_can_self_select (delete.tests.FastDeleteTests)\", \"test_fast_delete_fk (delete.tests.FastDeleteTests)\", \"test_fast_delete_inheritance (delete.tests.FastDeleteTests)\", \"test_fast_delete_joined_qs (delete.tests.FastDeleteTests)\", \"test_fast_delete_large_batch (delete.tests.FastDeleteTests)\", \"test_fast_delete_m2m (delete.tests.FastDeleteTests)\", \"test_fast_delete_qs (delete.tests.FastDeleteTests)\", \"test_fast_delete_revm2m (delete.tests.FastDeleteTests)\", \"test_auto (delete.tests.OnDeleteTests)\", \"test_auto_nullable (delete.tests.OnDeleteTests)\", \"test_cascade (delete.tests.OnDeleteTests)\", \"test_cascade_from_child (delete.tests.OnDeleteTests)\", \"test_cascade_from_parent (delete.tests.OnDeleteTests)\", \"test_cascade_nullable (delete.tests.OnDeleteTests)\", \"test_do_nothing (delete.tests.OnDeleteTests)\", \"test_do_nothing_qscount (delete.tests.OnDeleteTests)\", \"test_inheritance_cascade_down (delete.tests.OnDeleteTests)\", \"test_inheritance_cascade_up (delete.tests.OnDeleteTests)\", \"test_o2o_setnull (delete.tests.OnDeleteTests)\", \"test_protect (delete.tests.OnDeleteTests)\", \"test_setdefault (delete.tests.OnDeleteTests)\", \"test_setdefault_none (delete.tests.OnDeleteTests)\", \"test_setnull (delete.tests.OnDeleteTests)\", \"test_setnull_from_child (delete.tests.OnDeleteTests)\", \"test_setnull_from_parent (delete.tests.OnDeleteTests)\", \"test_setvalue (delete.tests.OnDeleteTests)\", \"test_bulk (delete.tests.DeletionTests)\", \"test_can_defer_constraint_checks (delete.tests.DeletionTests)\", \"test_delete_with_keeping_parents (delete.tests.DeletionTests)\", \"test_delete_with_keeping_parents_relationships (delete.tests.DeletionTests)\", \"test_deletion_order (delete.tests.DeletionTests)\", \"test_hidden_related (delete.tests.DeletionTests)\", \"test_instance_update (delete.tests.DeletionTests)\", \"test_large_delete (delete.tests.DeletionTests)\", \"test_large_delete_related (delete.tests.DeletionTests)\", \"test_m2m (delete.tests.DeletionTests)\", \"test_model_delete_returns_num_rows (delete.tests.DeletionTests)\", \"test_proxied_model_duplicate_queries (delete.tests.DeletionTests)\", \"test_queryset_delete_returns_num_rows (delete.tests.DeletionTests)\", \"test_relational_post_delete_signals_happen_before_parent_object (delete.tests.DeletionTests)\"]", + "expected_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "test_file_spans": { + "tests/delete/tests.py": [ + "imports", + "FastDeleteTests.test_fast_delete_joined_qs" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/db/models/base.py": [ + "Model.delete" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "django/db/models/base.py": [ + "Model.delete" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/db/models/base.py": [ + "Model.delete" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/db/models/deletion.py": [ + "imports", + "CASCADE", + "PROTECT", + "SET", + "get_candidate_relations_to_delete", + "Collector.__init__", + "Collector.add", + "Collector.add_field_update", + "Collector.can_fast_delete", + "Collector.get_del_batches", + "Collector.collect", + "Collector.related_objects", + "Collector.sort", + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "imports", + "CASCADE", + "PROTECT", + "SET", + "get_candidate_relations_to_delete", + "Collector.__init__", + "Collector.add", + "Collector.add_field_update", + "Collector.can_fast_delete", + "Collector.get_del_batches", + "Collector.collect", + "Collector.related_objects", + "Collector.sort", + "Collector.delete" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/db/models/base.py": [ + "Model.delete" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/db/models/base.py": [ + "Model.delete" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/db/models/base.py": [ + "Model.delete" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/db/models/base.py": [ + "Model.delete" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/db/models/base.py": [ + "Model.delete" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ], + "tests/delete/tests.py": [ + "DeletionTests.test_m2m" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ], + "tests/delete/tests.py": [] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "scikit-learn__scikit-learn-13584", + "repo": "scikit-learn/scikit-learn", + "base_commit": "0e3c1879b06d839171b7d0a607d71bbb19a966a9", + "problem_statement": "bug in print_changed_only in new repr: vector values\n```python\r\nimport sklearn\r\nimport numpy as np\r\nfrom sklearn.linear_model import LogisticRegressionCV\r\nsklearn.set_config(print_changed_only=True)\r\nprint(LogisticRegressionCV(Cs=np.array([0.1, 1])))\r\n```\r\n> ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()\r\n\r\nping @NicolasHug \r\n\n", + "golden_patch": "diff --git a/sklearn/utils/_pprint.py b/sklearn/utils/_pprint.py\n--- a/sklearn/utils/_pprint.py\n+++ b/sklearn/utils/_pprint.py\n@@ -95,7 +95,7 @@ def _changed_params(estimator):\n init_params = signature(init_func).parameters\n init_params = {name: param.default for name, param in init_params.items()}\n for k, v in params.items():\n- if (v != init_params[k] and\n+ if (repr(v) != repr(init_params[k]) and\n not (is_scalar_nan(init_params[k]) and is_scalar_nan(v))):\n filtered_params[k] = v\n return filtered_params\n", + "test_patch": "diff --git a/sklearn/utils/tests/test_pprint.py b/sklearn/utils/tests/test_pprint.py\n--- a/sklearn/utils/tests/test_pprint.py\n+++ b/sklearn/utils/tests/test_pprint.py\n@@ -4,6 +4,7 @@\n import numpy as np\n \n from sklearn.utils._pprint import _EstimatorPrettyPrinter\n+from sklearn.linear_model import LogisticRegressionCV\n from sklearn.pipeline import make_pipeline\n from sklearn.base import BaseEstimator, TransformerMixin\n from sklearn.feature_selection import SelectKBest, chi2\n@@ -212,6 +213,9 @@ def test_changed_only():\n expected = \"\"\"SimpleImputer()\"\"\"\n assert imputer.__repr__() == expected\n \n+ # make sure array parameters don't throw error (see #13583)\n+ repr(LogisticRegressionCV(Cs=np.array([0.1, 1])))\n+\n set_config(print_changed_only=False)\n \n \n", + "fail_to_pass": "[\"sklearn/utils/tests/test_pprint.py::test_changed_only\", \"sklearn/utils/tests/test_pprint.py::test_pipeline\", \"sklearn/utils/tests/test_pprint.py::test_deeply_nested\", \"sklearn/utils/tests/test_pprint.py::test_gridsearch\", \"sklearn/utils/tests/test_pprint.py::test_gridsearch_pipeline\", \"sklearn/utils/tests/test_pprint.py::test_n_max_elements_to_show\"]", + "pass_to_pass": "[\"sklearn/utils/tests/test_pprint.py::test_basic\", \"sklearn/utils/tests/test_pprint.py::test_length_constraint\", \"sklearn/utils/tests/test_pprint.py::test_builtin_prettyprinter\"]", + "expected_spans": { + "sklearn/utils/_pprint.py": [ + "_changed_params" + ] + }, + "test_file_spans": { + "sklearn/utils/tests/test_pprint.py": [ + "imports", + "test_changed_only" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "sklearn/utils/_pprint.py": [ + "imports", + "_changed_params" + ] + }, + "alternative_spans": { + "sklearn/utils/_pprint.py": [ + "imports", + "_changed_params" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "sklearn/utils/_pprint.py": [ + "imports", + "_changed_params" + ] + }, + "alternative_spans": { + "sklearn/utils/_pprint.py": [ + "imports", + "_changed_params" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV", + "LogisticRegressionCV.score" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "sklearn/utils/_pprint.py": [ + "docstring", + "_changed_params" + ] + }, + "alternative_spans": { + "sklearn/utils/_pprint.py": [ + "docstring", + "_changed_params" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sklearn/utils/_pprint.py": [ + "imports", + "_changed_params" + ] + }, + "alternative_spans": { + "sklearn/utils/_pprint.py": [ + "imports", + "_changed_params" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV.__init__" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sklearn/utils/_pprint.py": [ + "docstring", + "docstring:27", + "imports", + "KeyValTuple.__repr__", + "_changed_params", + "_EstimatorPrettyPrinter.__init__", + "_EstimatorPrettyPrinter.format", + "_EstimatorPrettyPrinter._pprint_estimator", + "_EstimatorPrettyPrinter._format_dict_items", + "_EstimatorPrettyPrinter._format_params", + "_EstimatorPrettyPrinter._format_params_or_dict_items", + "_EstimatorPrettyPrinter._format_items", + "_EstimatorPrettyPrinter._pprint_key_val_tuple", + "_safe_repr" + ] + }, + "alternative_spans": { + "sklearn/utils/_pprint.py": [ + "docstring", + "docstring:27", + "imports", + "KeyValTuple.__repr__", + "_changed_params", + "_EstimatorPrettyPrinter.__init__", + "_EstimatorPrettyPrinter.format", + "_EstimatorPrettyPrinter._pprint_estimator", + "_EstimatorPrettyPrinter._format_dict_items", + "_EstimatorPrettyPrinter._format_params", + "_EstimatorPrettyPrinter._format_params_or_dict_items", + "_EstimatorPrettyPrinter._format_items", + "_EstimatorPrettyPrinter._pprint_key_val_tuple", + "_safe_repr" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sklearn/utils/_pprint.py": [ + "docstring", + "_changed_params", + "_EstimatorPrettyPrinter", + "_EstimatorPrettyPrinter._format_params_or_dict_items" + ] + }, + "alternative_spans": { + "sklearn/utils/_pprint.py": [ + "docstring", + "_changed_params", + "_EstimatorPrettyPrinter", + "_EstimatorPrettyPrinter._format_params_or_dict_items" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sklearn/utils/_pprint.py": [ + "_changed_params" + ] + }, + "alternative_spans": { + "sklearn/utils/_pprint.py": [ + "_changed_params" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sklearn/utils/_pprint.py": [ + "_changed_params" + ] + }, + "alternative_spans": { + "sklearn/utils/_pprint.py": [ + "_changed_params" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "sklearn/utils/_pprint.py": [ + "_changed_params" + ] + }, + "alternative_spans": { + "sklearn/utils/_pprint.py": [ + "_changed_params" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sklearn/utils/_pprint.py": [ + "_changed_params" + ] + }, + "alternative_spans": { + "sklearn/utils/_pprint.py": [ + "_changed_params" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "sklearn/utils/_pprint.py": [ + "docstring", + "_changed_params" + ] + }, + "alternative_spans": { + "sklearn/utils/_pprint.py": [ + "docstring", + "_changed_params" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sklearn/utils/_pprint.py": [ + "_changed_params" + ] + }, + "alternative_spans": { + "sklearn/utils/_pprint.py": [ + "_changed_params" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sklearn/utils/_pprint.py": [ + "_changed_params" + ] + }, + "alternative_spans": { + "sklearn/utils/_pprint.py": [ + "_changed_params" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "sklearn/utils/_pprint.py": [ + "imports", + "_changed_params" + ] + }, + "alternative_spans": { + "sklearn/utils/_pprint.py": [ + "imports", + "_changed_params" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sklearn/utils/_pprint.py": [ + "imports", + "_changed_params" + ] + }, + "alternative_spans": { + "sklearn/utils/_pprint.py": [ + "imports", + "_changed_params" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sklearn/utils/_pprint.py": [ + "imports", + "_changed_params" + ], + "sklearn/utils/tests/test_pprint.py": [ + "test_changed_only" + ] + }, + "alternative_spans": { + "sklearn/utils/_pprint.py": [ + "imports", + "_changed_params" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV.__init__" + ] + }, + "alternative_spans": {} + } + ], + "alternative_spans": [] + }, + { + "instance_id": "pytest-dev__pytest-5103", + "repo": "pytest-dev/pytest", + "base_commit": "10ca84ffc56c2dd2d9dc4bd71b7b898e083500cd", + "problem_statement": "Unroll the iterable for all/any calls to get better reports\nSometime I need to assert some predicate on all of an iterable, and for that the builtin functions `all`/`any` are great - but the failure messages aren't useful at all!\r\nFor example - the same test written in three ways:\r\n\r\n- A generator expression\r\n```sh \r\n def test_all_even():\r\n even_stevens = list(range(1,100,2))\r\n> assert all(is_even(number) for number in even_stevens)\r\nE assert False\r\nE + where False = all(. at 0x101f82ed0>)\r\n```\r\n- A list comprehension\r\n```sh\r\n def test_all_even():\r\n even_stevens = list(range(1,100,2))\r\n> assert all([is_even(number) for number in even_stevens])\r\nE assert False\r\nE + where False = all([False, False, False, False, False, False, ...])\r\n```\r\n- A for loop\r\n```sh\r\n def test_all_even():\r\n even_stevens = list(range(1,100,2))\r\n for number in even_stevens:\r\n> assert is_even(number)\r\nE assert False\r\nE + where False = is_even(1)\r\n\r\ntest_all_any.py:7: AssertionError\r\n```\r\nThe only one that gives a meaningful report is the for loop - but it's way more wordy, and `all` asserts don't translate to a for loop nicely (I'll have to write a `break` or a helper function - yuck)\r\nI propose the assertion re-writer \"unrolls\" the iterator to the third form, and then uses the already existing reports.\r\n\r\n- [x] Include a detailed description of the bug or suggestion\r\n- [x] `pip list` of the virtual environment you are using\r\n```\r\nPackage Version\r\n-------------- -------\r\natomicwrites 1.3.0 \r\nattrs 19.1.0 \r\nmore-itertools 7.0.0 \r\npip 19.0.3 \r\npluggy 0.9.0 \r\npy 1.8.0 \r\npytest 4.4.0 \r\nsetuptools 40.8.0 \r\nsix 1.12.0 \r\n```\r\n- [x] pytest and operating system versions\r\n`platform darwin -- Python 3.7.3, pytest-4.4.0, py-1.8.0, pluggy-0.9.0`\r\n- [x] Minimal example if possible\r\n\n", + "golden_patch": "diff --git a/src/_pytest/assertion/rewrite.py b/src/_pytest/assertion/rewrite.py\n--- a/src/_pytest/assertion/rewrite.py\n+++ b/src/_pytest/assertion/rewrite.py\n@@ -964,6 +964,8 @@ def visit_Call_35(self, call):\n \"\"\"\n visit `ast.Call` nodes on Python3.5 and after\n \"\"\"\n+ if isinstance(call.func, ast.Name) and call.func.id == \"all\":\n+ return self._visit_all(call)\n new_func, func_expl = self.visit(call.func)\n arg_expls = []\n new_args = []\n@@ -987,6 +989,27 @@ def visit_Call_35(self, call):\n outer_expl = \"%s\\n{%s = %s\\n}\" % (res_expl, res_expl, expl)\n return res, outer_expl\n \n+ def _visit_all(self, call):\n+ \"\"\"Special rewrite for the builtin all function, see #5062\"\"\"\n+ if not isinstance(call.args[0], (ast.GeneratorExp, ast.ListComp)):\n+ return\n+ gen_exp = call.args[0]\n+ assertion_module = ast.Module(\n+ body=[ast.Assert(test=gen_exp.elt, lineno=1, msg=\"\", col_offset=1)]\n+ )\n+ AssertionRewriter(module_path=None, config=None).run(assertion_module)\n+ for_loop = ast.For(\n+ iter=gen_exp.generators[0].iter,\n+ target=gen_exp.generators[0].target,\n+ body=assertion_module.body,\n+ orelse=[],\n+ )\n+ self.statements.append(for_loop)\n+ return (\n+ ast.Num(n=1),\n+ \"\",\n+ ) # Return an empty expression, all the asserts are in the for_loop\n+\n def visit_Starred(self, starred):\n # From Python 3.5, a Starred node can appear in a function call\n res, expl = self.visit(starred.value)\n@@ -997,6 +1020,8 @@ def visit_Call_legacy(self, call):\n \"\"\"\n visit `ast.Call nodes on 3.4 and below`\n \"\"\"\n+ if isinstance(call.func, ast.Name) and call.func.id == \"all\":\n+ return self._visit_all(call)\n new_func, func_expl = self.visit(call.func)\n arg_expls = []\n new_args = []\n", + "test_patch": "diff --git a/testing/test_assertrewrite.py b/testing/test_assertrewrite.py\n--- a/testing/test_assertrewrite.py\n+++ b/testing/test_assertrewrite.py\n@@ -656,6 +656,12 @@ def __repr__(self):\n else:\n assert lines == [\"assert 0 == 1\\n + where 1 = \\\\n{ \\\\n~ \\\\n}.a\"]\n \n+ def test_unroll_expression(self):\n+ def f():\n+ assert all(x == 1 for x in range(10))\n+\n+ assert \"0 == 1\" in getmsg(f)\n+\n def test_custom_repr_non_ascii(self):\n def f():\n class A(object):\n@@ -671,6 +677,53 @@ def __repr__(self):\n assert \"UnicodeDecodeError\" not in msg\n assert \"UnicodeEncodeError\" not in msg\n \n+ def test_unroll_generator(self, testdir):\n+ testdir.makepyfile(\n+ \"\"\"\n+ def check_even(num):\n+ if num % 2 == 0:\n+ return True\n+ return False\n+\n+ def test_generator():\n+ odd_list = list(range(1,9,2))\n+ assert all(check_even(num) for num in odd_list)\"\"\"\n+ )\n+ result = testdir.runpytest()\n+ result.stdout.fnmatch_lines([\"*assert False*\", \"*where False = check_even(1)*\"])\n+\n+ def test_unroll_list_comprehension(self, testdir):\n+ testdir.makepyfile(\n+ \"\"\"\n+ def check_even(num):\n+ if num % 2 == 0:\n+ return True\n+ return False\n+\n+ def test_list_comprehension():\n+ odd_list = list(range(1,9,2))\n+ assert all([check_even(num) for num in odd_list])\"\"\"\n+ )\n+ result = testdir.runpytest()\n+ result.stdout.fnmatch_lines([\"*assert False*\", \"*where False = check_even(1)*\"])\n+\n+ def test_for_loop(self, testdir):\n+ testdir.makepyfile(\n+ \"\"\"\n+ def check_even(num):\n+ if num % 2 == 0:\n+ return True\n+ return False\n+\n+ def test_for_loop():\n+ odd_list = list(range(1,9,2))\n+ for num in odd_list:\n+ assert check_even(num)\n+ \"\"\"\n+ )\n+ result = testdir.runpytest()\n+ result.stdout.fnmatch_lines([\"*assert False*\", \"*where False = check_even(1)*\"])\n+\n \n class TestRewriteOnImport(object):\n def test_pycache_is_a_file(self, testdir):\n", + "fail_to_pass": "[\"testing/test_assertrewrite.py::TestAssertionRewrite::test_unroll_expression\"]", + "pass_to_pass": "[\"testing/test_assertrewrite.py::TestAssertionRewrite::test_place_initial_imports\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_dont_rewrite\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_name\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_dont_rewrite_if_hasattr_fails\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_assert_already_has_message\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_boolop\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_short_circuit_evaluation\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_unary_op\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_binary_op\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_boolop_percent\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_call\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_attribute\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_comparisons\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_len\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_custom_reprcompare\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_assert_raising_nonzero_in_comparison\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_formatchar\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_custom_repr\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_custom_repr_non_ascii\", \"testing/test_assertrewrite.py::TestAssertionRewriteHookDetails::test_read_pyc\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_dont_rewrite_plugin\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_assertion_message\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_assertion_message_multiline\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_assertion_message_tuple\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_assertion_message_expr\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_assertion_message_escape\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_assertion_messages_bytes\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_at_operator_issue1290\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_starred_with_side_effect\", \"testing/test_assertrewrite.py::TestAssertionRewrite::test_for_loop\", \"testing/test_assertrewrite.py::TestRewriteOnImport::test_pycache_is_a_file\", \"testing/test_assertrewrite.py::TestRewriteOnImport::test_pycache_is_readonly\", \"testing/test_assertrewrite.py::TestRewriteOnImport::test_zipfile\", \"testing/test_assertrewrite.py::TestRewriteOnImport::test_readonly\", \"testing/test_assertrewrite.py::TestRewriteOnImport::test_dont_write_bytecode\", \"testing/test_assertrewrite.py::TestRewriteOnImport::test_orphaned_pyc_file\", \"testing/test_assertrewrite.py::TestRewriteOnImport::test_pyc_vs_pyo\", \"testing/test_assertrewrite.py::TestRewriteOnImport::test_package\", \"testing/test_assertrewrite.py::TestRewriteOnImport::test_translate_newlines\", \"testing/test_assertrewrite.py::TestRewriteOnImport::test_package_without__init__py\", \"testing/test_assertrewrite.py::TestRewriteOnImport::test_rewrite_warning\", \"testing/test_assertrewrite.py::TestRewriteOnImport::test_rewrite_module_imported_from_conftest\", \"testing/test_assertrewrite.py::TestRewriteOnImport::test_remember_rewritten_modules\", \"testing/test_assertrewrite.py::TestRewriteOnImport::test_rewrite_warning_using_pytest_plugins\", \"testing/test_assertrewrite.py::TestRewriteOnImport::test_rewrite_warning_using_pytest_plugins_env_var\", \"testing/test_assertrewrite.py::TestAssertionRewriteHookDetails::test_loader_is_package_false_for_module\", \"testing/test_assertrewrite.py::TestAssertionRewriteHookDetails::test_loader_is_package_true_for_package\", \"testing/test_assertrewrite.py::TestAssertionRewriteHookDetails::test_sys_meta_path_munged\", \"testing/test_assertrewrite.py::TestAssertionRewriteHookDetails::test_write_pyc\", \"testing/test_assertrewrite.py::TestAssertionRewriteHookDetails::test_resources_provider_for_loader\", \"testing/test_assertrewrite.py::TestAssertionRewriteHookDetails::test_reload_is_same\", \"testing/test_assertrewrite.py::TestAssertionRewriteHookDetails::test_reload_reloads\", \"testing/test_assertrewrite.py::TestAssertionRewriteHookDetails::test_get_data_support\", \"testing/test_assertrewrite.py::test_issue731\", \"testing/test_assertrewrite.py::TestIssue925::test_simple_case\", \"testing/test_assertrewrite.py::TestIssue925::test_long_case\", \"testing/test_assertrewrite.py::TestIssue925::test_many_brackets\", \"testing/test_assertrewrite.py::TestIssue2121::test_rewrite_python_files_contain_subdirs\", \"testing/test_assertrewrite.py::test_source_mtime_long_long[-1]\", \"testing/test_assertrewrite.py::test_source_mtime_long_long[1]\", \"testing/test_assertrewrite.py::test_rewrite_infinite_recursion\", \"testing/test_assertrewrite.py::TestEarlyRewriteBailout::test_basic\", \"testing/test_assertrewrite.py::TestEarlyRewriteBailout::test_pattern_contains_subdirectories\", \"testing/test_assertrewrite.py::TestEarlyRewriteBailout::test_cwd_changed\"]", + "expected_spans": { + "src/_pytest/assertion/rewrite.py": [ + "AssertionRewriter.visit_Call_35", + "AssertionRewriter.visit_Call_legacy" + ] + }, + "test_file_spans": { + "testing/test_assertrewrite.py": [ + "TestAssertionRewrite.test_custom_repr_non_ascii", + "TestAssertionRewrite" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-11283", + "repo": "django/django", + "base_commit": "08a4ee06510ae45562c228eefbdcaac84bd38c7a", + "problem_statement": "Migration auth.0011_update_proxy_permissions fails for models recreated as a proxy.\nDescription\n\t \n\t\t(last modified by Mariusz Felisiak)\n\t \nI am trying to update my project to Django 2.2. When I launch python manage.py migrate, I get this error message when migration auth.0011_update_proxy_permissions is applying (full stacktrace is available \u200bhere):\ndjango.db.utils.IntegrityError: duplicate key value violates unique constraint \"idx_18141_auth_permission_content_type_id_01ab375a_uniq\" DETAIL: Key (co.ntent_type_id, codename)=(12, add_agency) already exists.\nIt looks like the migration is trying to re-create already existing entries in the auth_permission table. At first I though it cloud because we recently renamed a model. But after digging and deleting the entries associated with the renamed model from our database in the auth_permission table, the problem still occurs with other proxy models.\nI tried to update directly from 2.0.13 and 2.1.8. The issues appeared each time. I also deleted my venv and recreated it without an effect.\nI searched for a ticket about this on the bug tracker but found nothing. I also posted this on \u200bdjango-users and was asked to report this here.\n", + "golden_patch": "diff --git a/django/contrib/auth/migrations/0011_update_proxy_permissions.py b/django/contrib/auth/migrations/0011_update_proxy_permissions.py\n--- a/django/contrib/auth/migrations/0011_update_proxy_permissions.py\n+++ b/django/contrib/auth/migrations/0011_update_proxy_permissions.py\n@@ -1,5 +1,18 @@\n-from django.db import migrations\n+import sys\n+\n+from django.core.management.color import color_style\n+from django.db import migrations, transaction\n from django.db.models import Q\n+from django.db.utils import IntegrityError\n+\n+WARNING = \"\"\"\n+ A problem arose migrating proxy model permissions for {old} to {new}.\n+\n+ Permission(s) for {new} already existed.\n+ Codenames Q: {query}\n+\n+ Ensure to audit ALL permissions for {old} and {new}.\n+\"\"\"\n \n \n def update_proxy_model_permissions(apps, schema_editor, reverse=False):\n@@ -7,6 +20,7 @@ def update_proxy_model_permissions(apps, schema_editor, reverse=False):\n Update the content_type of proxy model permissions to use the ContentType\n of the proxy model.\n \"\"\"\n+ style = color_style()\n Permission = apps.get_model('auth', 'Permission')\n ContentType = apps.get_model('contenttypes', 'ContentType')\n for Model in apps.get_models():\n@@ -24,10 +38,16 @@ def update_proxy_model_permissions(apps, schema_editor, reverse=False):\n proxy_content_type = ContentType.objects.get_for_model(Model, for_concrete_model=False)\n old_content_type = proxy_content_type if reverse else concrete_content_type\n new_content_type = concrete_content_type if reverse else proxy_content_type\n- Permission.objects.filter(\n- permissions_query,\n- content_type=old_content_type,\n- ).update(content_type=new_content_type)\n+ try:\n+ with transaction.atomic():\n+ Permission.objects.filter(\n+ permissions_query,\n+ content_type=old_content_type,\n+ ).update(content_type=new_content_type)\n+ except IntegrityError:\n+ old = '{}_{}'.format(old_content_type.app_label, old_content_type.model)\n+ new = '{}_{}'.format(new_content_type.app_label, new_content_type.model)\n+ sys.stdout.write(style.WARNING(WARNING.format(old=old, new=new, query=permissions_query)))\n \n \n def revert_proxy_model_permissions(apps, schema_editor):\n", + "test_patch": "diff --git a/tests/auth_tests/test_migrations.py b/tests/auth_tests/test_migrations.py\n--- a/tests/auth_tests/test_migrations.py\n+++ b/tests/auth_tests/test_migrations.py\n@@ -4,6 +4,7 @@\n from django.contrib.auth.models import Permission, User\n from django.contrib.contenttypes.models import ContentType\n from django.test import TestCase\n+from django.test.utils import captured_stdout\n \n from .models import Proxy, UserProxy\n \n@@ -152,3 +153,27 @@ def test_user_keeps_same_permissions_after_migrating_backward(self):\n user = User._default_manager.get(pk=user.pk)\n for permission in [self.default_permission, self.custom_permission]:\n self.assertTrue(user.has_perm('auth_tests.' + permission.codename))\n+\n+ def test_migrate_with_existing_target_permission(self):\n+ \"\"\"\n+ Permissions may already exist:\n+\n+ - Old workaround was to manually create permissions for proxy models.\n+ - Model may have been concrete and then converted to proxy.\n+\n+ Output a reminder to audit relevant permissions.\n+ \"\"\"\n+ proxy_model_content_type = ContentType.objects.get_for_model(Proxy, for_concrete_model=False)\n+ Permission.objects.create(\n+ content_type=proxy_model_content_type,\n+ codename='add_proxy',\n+ name='Can add proxy',\n+ )\n+ Permission.objects.create(\n+ content_type=proxy_model_content_type,\n+ codename='display_proxys',\n+ name='May display proxys information',\n+ )\n+ with captured_stdout() as stdout:\n+ update_proxy_permissions.update_proxy_model_permissions(apps, None)\n+ self.assertIn('A problem arose migrating proxy model permissions', stdout.getvalue())\n", + "fail_to_pass": "[\"test_migrate_with_existing_target_permission (auth_tests.test_migrations.ProxyModelWithSameAppLabelTests)\"]", + "pass_to_pass": "[\"test_migrate_backwards (auth_tests.test_migrations.ProxyModelWithDifferentAppLabelTests)\", \"test_proxy_model_permissions_contenttype (auth_tests.test_migrations.ProxyModelWithDifferentAppLabelTests)\", \"test_user_has_now_proxy_model_permissions (auth_tests.test_migrations.ProxyModelWithDifferentAppLabelTests)\", \"test_user_keeps_same_permissions_after_migrating_backward (auth_tests.test_migrations.ProxyModelWithDifferentAppLabelTests)\", \"test_migrate_backwards (auth_tests.test_migrations.ProxyModelWithSameAppLabelTests)\", \"test_proxy_model_permissions_contenttype (auth_tests.test_migrations.ProxyModelWithSameAppLabelTests)\", \"test_user_keeps_same_permissions_after_migrating_backward (auth_tests.test_migrations.ProxyModelWithSameAppLabelTests)\", \"test_user_still_has_proxy_model_permissions (auth_tests.test_migrations.ProxyModelWithSameAppLabelTests)\"]", + "expected_spans": { + "django/contrib/auth/migrations/0011_update_proxy_permissions.py": [ + "imports", + "update_proxy_model_permissions" + ] + }, + "test_file_spans": { + "tests/auth_tests/test_migrations.py": [ + "imports" + ] + }, + "resolved_by": [ + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/contrib/auth/migrations/0011_update_proxy_permissions.py": [ + "update_proxy_model_permissions", + "Migration" + ] + }, + "alternative_spans": { + "django/contrib/auth/migrations/0011_update_proxy_permissions.py": [ + "update_proxy_model_permissions", + "Migration" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240806_SuperCoder2.0", + "spans": { + "django/contrib/auth/migrations/0011_update_proxy_permissions.py": [ + "update_proxy_model_permissions", + "Migration" + ] + } + } + ] + }, + { + "instance_id": "scikit-learn__scikit-learn-13779", + "repo": "scikit-learn/scikit-learn", + "base_commit": "b34751b7ed02b2cfcc36037fb729d4360480a299", + "problem_statement": "Voting estimator will fail at fit if weights are passed and an estimator is None\nBecause we don't check for an estimator to be `None` in `sample_weight` support, `fit` is failing`.\r\n\r\n```python\r\n X, y = load_iris(return_X_y=True)\r\n voter = VotingClassifier(\r\n estimators=[('lr', LogisticRegression()),\r\n ('rf', RandomForestClassifier())]\r\n )\r\n voter.fit(X, y, sample_weight=np.ones(y.shape))\r\n voter.set_params(lr=None)\r\n voter.fit(X, y, sample_weight=np.ones(y.shape))\r\n```\r\n\r\n```\r\nAttributeError: 'NoneType' object has no attribute 'fit'\r\n```\n", + "golden_patch": "diff --git a/sklearn/ensemble/voting.py b/sklearn/ensemble/voting.py\n--- a/sklearn/ensemble/voting.py\n+++ b/sklearn/ensemble/voting.py\n@@ -78,6 +78,8 @@ def fit(self, X, y, sample_weight=None):\n \n if sample_weight is not None:\n for name, step in self.estimators:\n+ if step is None:\n+ continue\n if not has_fit_parameter(step, 'sample_weight'):\n raise ValueError('Underlying estimator \\'%s\\' does not'\n ' support sample weights.' % name)\n", + "test_patch": "diff --git a/sklearn/ensemble/tests/test_voting.py b/sklearn/ensemble/tests/test_voting.py\n--- a/sklearn/ensemble/tests/test_voting.py\n+++ b/sklearn/ensemble/tests/test_voting.py\n@@ -8,9 +8,11 @@\n from sklearn.utils.testing import assert_equal\n from sklearn.utils.testing import assert_raise_message\n from sklearn.exceptions import NotFittedError\n+from sklearn.linear_model import LinearRegression\n from sklearn.linear_model import LogisticRegression\n from sklearn.naive_bayes import GaussianNB\n from sklearn.ensemble import RandomForestClassifier\n+from sklearn.ensemble import RandomForestRegressor\n from sklearn.ensemble import VotingClassifier, VotingRegressor\n from sklearn.model_selection import GridSearchCV\n from sklearn import datasets\n@@ -507,3 +509,25 @@ def test_transform():\n eclf3.transform(X).swapaxes(0, 1).reshape((4, 6)),\n eclf2.transform(X)\n )\n+\n+\n+@pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22\n+@pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22\n+@pytest.mark.parametrize(\n+ \"X, y, voter\",\n+ [(X, y, VotingClassifier(\n+ [('lr', LogisticRegression()),\n+ ('rf', RandomForestClassifier(n_estimators=5))])),\n+ (X_r, y_r, VotingRegressor(\n+ [('lr', LinearRegression()),\n+ ('rf', RandomForestRegressor(n_estimators=5))]))]\n+)\n+def test_none_estimator_with_weights(X, y, voter):\n+ # check that an estimator can be set to None and passing some weight\n+ # regression test for\n+ # https://github.com/scikit-learn/scikit-learn/issues/13777\n+ voter.fit(X, y, sample_weight=np.ones(y.shape))\n+ voter.set_params(lr=None)\n+ voter.fit(X, y, sample_weight=np.ones(y.shape))\n+ y_pred = voter.predict(X)\n+ assert y_pred.shape == y.shape\n", + "fail_to_pass": "[\"sklearn/ensemble/tests/test_voting.py::test_none_estimator_with_weights[X0-y0-voter0]\", \"sklearn/ensemble/tests/test_voting.py::test_none_estimator_with_weights[X1-y1-voter1]\"]", + "pass_to_pass": "[\"sklearn/ensemble/tests/test_voting.py::test_estimator_init\", \"sklearn/ensemble/tests/test_voting.py::test_predictproba_hardvoting\", \"sklearn/ensemble/tests/test_voting.py::test_notfitted\", \"sklearn/ensemble/tests/test_voting.py::test_majority_label_iris\", \"sklearn/ensemble/tests/test_voting.py::test_tie_situation\", \"sklearn/ensemble/tests/test_voting.py::test_weights_iris\", \"sklearn/ensemble/tests/test_voting.py::test_weights_regressor\", \"sklearn/ensemble/tests/test_voting.py::test_predict_on_toy_problem\", \"sklearn/ensemble/tests/test_voting.py::test_predict_proba_on_toy_problem\", \"sklearn/ensemble/tests/test_voting.py::test_multilabel\", \"sklearn/ensemble/tests/test_voting.py::test_gridsearch\", \"sklearn/ensemble/tests/test_voting.py::test_parallel_fit\", \"sklearn/ensemble/tests/test_voting.py::test_sample_weight\", \"sklearn/ensemble/tests/test_voting.py::test_sample_weight_kwargs\", \"sklearn/ensemble/tests/test_voting.py::test_set_params\", \"sklearn/ensemble/tests/test_voting.py::test_set_estimator_none\", \"sklearn/ensemble/tests/test_voting.py::test_estimator_weights_format\", \"sklearn/ensemble/tests/test_voting.py::test_transform\"]", + "expected_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "test_file_spans": { + "sklearn/ensemble/tests/test_voting.py": [ + "imports" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "sklearn/ensemble/tests/test_voting.py": [ + "test_sample_weight_kwargs" + ], + "sklearn/ensemble/voting.py": [ + "_parallel_fit_estimator", + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_parallel_fit_estimator", + "_BaseVoting.fit" + ] + } + }, + { + "name": "20231010_rag_claude2", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sklearn/base.py": [ + "BaseEstimator.set_params" + ], + "sklearn/ensemble/voting.py": [ + "_parallel_fit_estimator", + "_BaseVoting._weights_not_none", + "_BaseVoting.fit" + ], + "sklearn/utils/validation.py": [ + "has_fit_parameter" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_parallel_fit_estimator", + "_BaseVoting._weights_not_none", + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "docstring", + "imports", + "_BaseVoting._weights_not_none", + "_BaseVoting._predict", + "_BaseVoting.fit", + "VotingClassifier.__init__", + "VotingClassifier.fit", + "VotingClassifier.predict", + "VotingClassifier._predict_proba", + "VotingClassifier.transform", + "VotingRegressor.predict" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "docstring", + "imports", + "_BaseVoting._weights_not_none", + "_BaseVoting._predict", + "_BaseVoting.fit", + "VotingClassifier.__init__", + "VotingClassifier.fit", + "VotingClassifier.predict", + "VotingClassifier._predict_proba", + "VotingClassifier.transform", + "VotingRegressor.predict" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_parallel_fit_estimator", + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_parallel_fit_estimator", + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_parallel_fit_estimator", + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_parallel_fit_estimator", + "_BaseVoting.fit" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "docstring", + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "docstring", + "_BaseVoting.fit" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "imports", + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "imports", + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sklearn/ensemble/tests/test_voting.py": [ + "test_sample_weight_kwargs" + ], + "sklearn/ensemble/voting.py": [ + "_parallel_fit_estimator", + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_parallel_fit_estimator", + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + }, + "alternative_spans": { + "sklearn/ensemble/voting.py": [ + "_BaseVoting.fit" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "pytest-dev__pytest-5221", + "repo": "pytest-dev/pytest", + "base_commit": "4a2fdce62b73944030cff9b3e52862868ca9584d", + "problem_statement": "Display fixture scope with `pytest --fixtures`\nIt would be useful to show fixture scopes with `pytest --fixtures`; currently the only way to learn the scope of a fixture is look at the docs (when that is documented) or at the source code.\n", + "golden_patch": "diff --git a/src/_pytest/python.py b/src/_pytest/python.py\n--- a/src/_pytest/python.py\n+++ b/src/_pytest/python.py\n@@ -1342,17 +1342,19 @@ def _showfixtures_main(config, session):\n currentmodule = module\n if verbose <= 0 and argname[0] == \"_\":\n continue\n+ tw.write(argname, green=True)\n+ if fixturedef.scope != \"function\":\n+ tw.write(\" [%s scope]\" % fixturedef.scope, cyan=True)\n if verbose > 0:\n- funcargspec = \"%s -- %s\" % (argname, bestrel)\n- else:\n- funcargspec = argname\n- tw.line(funcargspec, green=True)\n+ tw.write(\" -- %s\" % bestrel, yellow=True)\n+ tw.write(\"\\n\")\n loc = getlocation(fixturedef.func, curdir)\n doc = fixturedef.func.__doc__ or \"\"\n if doc:\n write_docstring(tw, doc)\n else:\n tw.line(\" %s: no docstring available\" % (loc,), red=True)\n+ tw.line()\n \n \n def write_docstring(tw, doc, indent=\" \"):\n", + "test_patch": "diff --git a/testing/python/fixtures.py b/testing/python/fixtures.py\n--- a/testing/python/fixtures.py\n+++ b/testing/python/fixtures.py\n@@ -3037,11 +3037,25 @@ def test_funcarg_compat(self, testdir):\n \n def test_show_fixtures(self, testdir):\n result = testdir.runpytest(\"--fixtures\")\n- result.stdout.fnmatch_lines([\"*tmpdir*\", \"*temporary directory*\"])\n+ result.stdout.fnmatch_lines(\n+ [\n+ \"tmpdir_factory [[]session scope[]]\",\n+ \"*for the test session*\",\n+ \"tmpdir\",\n+ \"*temporary directory*\",\n+ ]\n+ )\n \n def test_show_fixtures_verbose(self, testdir):\n result = testdir.runpytest(\"--fixtures\", \"-v\")\n- result.stdout.fnmatch_lines([\"*tmpdir*--*tmpdir.py*\", \"*temporary directory*\"])\n+ result.stdout.fnmatch_lines(\n+ [\n+ \"tmpdir_factory [[]session scope[]] -- *tmpdir.py*\",\n+ \"*for the test session*\",\n+ \"tmpdir -- *tmpdir.py*\",\n+ \"*temporary directory*\",\n+ ]\n+ )\n \n def test_show_fixtures_testmodule(self, testdir):\n p = testdir.makepyfile(\n", + "fail_to_pass": "[\"testing/python/fixtures.py::TestShowFixtures::test_show_fixtures\", \"testing/python/fixtures.py::TestShowFixtures::test_show_fixtures_verbose\"]", + "pass_to_pass": "[\"testing/python/fixtures.py::test_getfuncargnames\", \"testing/python/fixtures.py::TestFillFixtures::test_fillfuncargs_exposed\", \"testing/python/fixtures.py::TestShowFixtures::test_fixture_disallow_twice\", \"testing/python/fixtures.py::test_call_fixture_function_error\", \"testing/python/fixtures.py::TestFillFixtures::test_funcarg_lookupfails\", \"testing/python/fixtures.py::TestFillFixtures::test_detect_recursive_dependency_error\", \"testing/python/fixtures.py::TestFillFixtures::test_funcarg_basic\", \"testing/python/fixtures.py::TestFillFixtures::test_funcarg_lookup_modulelevel\", \"testing/python/fixtures.py::TestFillFixtures::test_funcarg_lookup_classlevel\", \"testing/python/fixtures.py::TestFillFixtures::test_conftest_funcargs_only_available_in_subdir\", \"testing/python/fixtures.py::TestFillFixtures::test_extend_fixture_module_class\", \"testing/python/fixtures.py::TestFillFixtures::test_extend_fixture_conftest_module\", \"testing/python/fixtures.py::TestFillFixtures::test_extend_fixture_conftest_conftest\", \"testing/python/fixtures.py::TestFillFixtures::test_extend_fixture_conftest_plugin\", \"testing/python/fixtures.py::TestFillFixtures::test_extend_fixture_plugin_plugin\", \"testing/python/fixtures.py::TestFillFixtures::test_override_parametrized_fixture_conftest_module\", \"testing/python/fixtures.py::TestFillFixtures::test_override_parametrized_fixture_conftest_conftest\", \"testing/python/fixtures.py::TestFillFixtures::test_override_non_parametrized_fixture_conftest_module\", \"testing/python/fixtures.py::TestFillFixtures::test_override_non_parametrized_fixture_conftest_conftest\", \"testing/python/fixtures.py::TestFillFixtures::test_override_autouse_fixture_with_parametrized_fixture_conftest_conftest\", \"testing/python/fixtures.py::TestFillFixtures::test_autouse_fixture_plugin\", \"testing/python/fixtures.py::TestFillFixtures::test_funcarg_lookup_error\", \"testing/python/fixtures.py::TestFillFixtures::test_fixture_excinfo_leak\", \"testing/python/fixtures.py::TestRequestBasic::test_request_attributes\", \"testing/python/fixtures.py::TestRequestBasic::test_request_attributes_method\", \"testing/python/fixtures.py::TestRequestBasic::test_request_contains_funcarg_arg2fixturedefs\", \"testing/python/fixtures.py::TestRequestBasic::test_request_garbage\", \"testing/python/fixtures.py::TestRequestBasic::test_getfixturevalue_recursive\", \"testing/python/fixtures.py::TestRequestBasic::test_getfixturevalue_teardown\", \"testing/python/fixtures.py::TestRequestBasic::test_getfixturevalue[getfixturevalue]\", \"testing/python/fixtures.py::TestRequestBasic::test_getfixturevalue[getfuncargvalue]\", \"testing/python/fixtures.py::TestRequestBasic::test_request_addfinalizer\", \"testing/python/fixtures.py::TestRequestBasic::test_request_addfinalizer_failing_setup\", \"testing/python/fixtures.py::TestRequestBasic::test_request_addfinalizer_failing_setup_module\", \"testing/python/fixtures.py::TestRequestBasic::test_request_addfinalizer_partial_setup_failure\", \"testing/python/fixtures.py::TestRequestBasic::test_request_subrequest_addfinalizer_exceptions\", \"testing/python/fixtures.py::TestRequestBasic::test_request_getmodulepath\", \"testing/python/fixtures.py::TestRequestBasic::test_request_fixturenames\", \"testing/python/fixtures.py::TestRequestBasic::test_request_fixturenames_dynamic_fixture\", \"testing/python/fixtures.py::TestRequestBasic::test_funcargnames_compatattr\", \"testing/python/fixtures.py::TestRequestBasic::test_setupdecorator_and_xunit\", \"testing/python/fixtures.py::TestRequestBasic::test_fixtures_sub_subdir_normalize_sep\", \"testing/python/fixtures.py::TestRequestBasic::test_show_fixtures_color_yes\", \"testing/python/fixtures.py::TestRequestBasic::test_newstyle_with_request\", \"testing/python/fixtures.py::TestRequestBasic::test_setupcontext_no_param\", \"testing/python/fixtures.py::TestRequestMarking::test_applymarker\", \"testing/python/fixtures.py::TestRequestMarking::test_accesskeywords\", \"testing/python/fixtures.py::TestRequestMarking::test_accessmarker_dynamic\", \"testing/python/fixtures.py::TestFixtureUsages::test_noargfixturedec\", \"testing/python/fixtures.py::TestFixtureUsages::test_receives_funcargs\", \"testing/python/fixtures.py::TestFixtureUsages::test_receives_funcargs_scope_mismatch\", \"testing/python/fixtures.py::TestFixtureUsages::test_receives_funcargs_scope_mismatch_issue660\", \"testing/python/fixtures.py::TestFixtureUsages::test_invalid_scope\", \"testing/python/fixtures.py::TestFixtureUsages::test_funcarg_parametrized_and_used_twice\", \"testing/python/fixtures.py::TestFixtureUsages::test_factory_uses_unknown_funcarg_as_dependency_error\", \"testing/python/fixtures.py::TestFixtureUsages::test_factory_setup_as_classes_fails\", \"testing/python/fixtures.py::TestFixtureUsages::test_request_can_be_overridden\", \"testing/python/fixtures.py::TestFixtureUsages::test_usefixtures_marker\", \"testing/python/fixtures.py::TestFixtureUsages::test_usefixtures_ini\", \"testing/python/fixtures.py::TestFixtureUsages::test_usefixtures_seen_in_showmarkers\", \"testing/python/fixtures.py::TestFixtureUsages::test_request_instance_issue203\", \"testing/python/fixtures.py::TestFixtureUsages::test_fixture_parametrized_with_iterator\", \"testing/python/fixtures.py::TestFixtureUsages::test_setup_functions_as_fixtures\", \"testing/python/fixtures.py::TestFixtureManagerParseFactories::test_parsefactories_evil_objects_issue214\", \"testing/python/fixtures.py::TestFixtureManagerParseFactories::test_parsefactories_conftest\", \"testing/python/fixtures.py::TestFixtureManagerParseFactories::test_parsefactories_conftest_and_module_and_class\", \"testing/python/fixtures.py::TestFixtureManagerParseFactories::test_parsefactories_relative_node_ids\", \"testing/python/fixtures.py::TestFixtureManagerParseFactories::test_package_xunit_fixture\", \"testing/python/fixtures.py::TestFixtureManagerParseFactories::test_package_fixture_complex\", \"testing/python/fixtures.py::TestFixtureManagerParseFactories::test_collect_custom_items\", \"testing/python/fixtures.py::TestAutouseDiscovery::test_parsefactories_conftest\", \"testing/python/fixtures.py::TestAutouseDiscovery::test_two_classes_separated_autouse\", \"testing/python/fixtures.py::TestAutouseDiscovery::test_setup_at_classlevel\", \"testing/python/fixtures.py::TestAutouseDiscovery::test_callables_nocode\", \"testing/python/fixtures.py::TestAutouseDiscovery::test_autouse_in_conftests\", \"testing/python/fixtures.py::TestAutouseDiscovery::test_autouse_in_module_and_two_classes\", \"testing/python/fixtures.py::TestAutouseManagement::test_autouse_conftest_mid_directory\", \"testing/python/fixtures.py::TestAutouseManagement::test_funcarg_and_setup\", \"testing/python/fixtures.py::TestAutouseManagement::test_uses_parametrized_resource\", \"testing/python/fixtures.py::TestAutouseManagement::test_session_parametrized_function\", \"testing/python/fixtures.py::TestAutouseManagement::test_class_function_parametrization_finalization\", \"testing/python/fixtures.py::TestAutouseManagement::test_scope_ordering\", \"testing/python/fixtures.py::TestAutouseManagement::test_parametrization_setup_teardown_ordering\", \"testing/python/fixtures.py::TestAutouseManagement::test_ordering_autouse_before_explicit\", \"testing/python/fixtures.py::TestAutouseManagement::test_ordering_dependencies_torndown_first[p10-p00]\", \"testing/python/fixtures.py::TestAutouseManagement::test_ordering_dependencies_torndown_first[p10-p01]\", \"testing/python/fixtures.py::TestAutouseManagement::test_ordering_dependencies_torndown_first[p11-p00]\", \"testing/python/fixtures.py::TestAutouseManagement::test_ordering_dependencies_torndown_first[p11-p01]\", \"testing/python/fixtures.py::TestFixtureMarker::test_parametrize\", \"testing/python/fixtures.py::TestFixtureMarker::test_multiple_parametrization_issue_736\", \"testing/python/fixtures.py::TestFixtureMarker::test_override_parametrized_fixture_issue_979['fixt,\", \"testing/python/fixtures.py::TestFixtureMarker::test_override_parametrized_fixture_issue_979['fixt,val']\", \"testing/python/fixtures.py::TestFixtureMarker::test_override_parametrized_fixture_issue_979[['fixt',\", \"testing/python/fixtures.py::TestFixtureMarker::test_override_parametrized_fixture_issue_979[('fixt',\", \"testing/python/fixtures.py::TestFixtureMarker::test_scope_session\", \"testing/python/fixtures.py::TestFixtureMarker::test_scope_session_exc\", \"testing/python/fixtures.py::TestFixtureMarker::test_scope_session_exc_two_fix\", \"testing/python/fixtures.py::TestFixtureMarker::test_scope_exc\", \"testing/python/fixtures.py::TestFixtureMarker::test_scope_module_uses_session\", \"testing/python/fixtures.py::TestFixtureMarker::test_scope_module_and_finalizer\", \"testing/python/fixtures.py::TestFixtureMarker::test_scope_mismatch_various\", \"testing/python/fixtures.py::TestFixtureMarker::test_register_only_with_mark\", \"testing/python/fixtures.py::TestFixtureMarker::test_parametrize_and_scope\", \"testing/python/fixtures.py::TestFixtureMarker::test_scope_mismatch\", \"testing/python/fixtures.py::TestFixtureMarker::test_parametrize_separated_order\", \"testing/python/fixtures.py::TestFixtureMarker::test_module_parametrized_ordering\", \"testing/python/fixtures.py::TestFixtureMarker::test_dynamic_parametrized_ordering\", \"testing/python/fixtures.py::TestFixtureMarker::test_class_ordering\", \"testing/python/fixtures.py::TestFixtureMarker::test_parametrize_separated_order_higher_scope_first\", \"testing/python/fixtures.py::TestFixtureMarker::test_parametrized_fixture_teardown_order\", \"testing/python/fixtures.py::TestFixtureMarker::test_fixture_finalizer\", \"testing/python/fixtures.py::TestFixtureMarker::test_class_scope_with_normal_tests\", \"testing/python/fixtures.py::TestFixtureMarker::test_request_is_clean\", \"testing/python/fixtures.py::TestFixtureMarker::test_parametrize_separated_lifecycle\", \"testing/python/fixtures.py::TestFixtureMarker::test_parametrize_function_scoped_finalizers_called\", \"testing/python/fixtures.py::TestFixtureMarker::test_finalizer_order_on_parametrization[session]\", \"testing/python/fixtures.py::TestFixtureMarker::test_finalizer_order_on_parametrization[function]\", \"testing/python/fixtures.py::TestFixtureMarker::test_finalizer_order_on_parametrization[module]\", \"testing/python/fixtures.py::TestFixtureMarker::test_class_scope_parametrization_ordering\", \"testing/python/fixtures.py::TestFixtureMarker::test_parametrize_setup_function\", \"testing/python/fixtures.py::TestFixtureMarker::test_fixture_marked_function_not_collected_as_test\", \"testing/python/fixtures.py::TestFixtureMarker::test_params_and_ids\", \"testing/python/fixtures.py::TestFixtureMarker::test_params_and_ids_yieldfixture\", \"testing/python/fixtures.py::TestFixtureMarker::test_deterministic_fixture_collection\", \"testing/python/fixtures.py::TestRequestScopeAccess::test_setup[session--fspath\", \"testing/python/fixtures.py::TestRequestScopeAccess::test_setup[module-module\", \"testing/python/fixtures.py::TestRequestScopeAccess::test_setup[class-module\", \"testing/python/fixtures.py::TestRequestScopeAccess::test_setup[function-module\", \"testing/python/fixtures.py::TestRequestScopeAccess::test_funcarg[session--fspath\", \"testing/python/fixtures.py::TestRequestScopeAccess::test_funcarg[module-module\", \"testing/python/fixtures.py::TestRequestScopeAccess::test_funcarg[class-module\", \"testing/python/fixtures.py::TestRequestScopeAccess::test_funcarg[function-module\", \"testing/python/fixtures.py::TestErrors::test_subfactory_missing_funcarg\", \"testing/python/fixtures.py::TestErrors::test_issue498_fixture_finalizer_failing\", \"testing/python/fixtures.py::TestErrors::test_setupfunc_missing_funcarg\", \"testing/python/fixtures.py::TestShowFixtures::test_funcarg_compat\", \"testing/python/fixtures.py::TestShowFixtures::test_show_fixtures_testmodule\", \"testing/python/fixtures.py::TestShowFixtures::test_show_fixtures_conftest[True]\", \"testing/python/fixtures.py::TestShowFixtures::test_show_fixtures_conftest[False]\", \"testing/python/fixtures.py::TestShowFixtures::test_show_fixtures_trimmed_doc\", \"testing/python/fixtures.py::TestShowFixtures::test_show_fixtures_indented_doc\", \"testing/python/fixtures.py::TestShowFixtures::test_show_fixtures_indented_doc_first_line_unindented\", \"testing/python/fixtures.py::TestShowFixtures::test_show_fixtures_indented_in_class\", \"testing/python/fixtures.py::TestShowFixtures::test_show_fixtures_different_files\", \"testing/python/fixtures.py::TestShowFixtures::test_show_fixtures_with_same_name\", \"testing/python/fixtures.py::TestContextManagerFixtureFuncs::test_simple[fixture]\", \"testing/python/fixtures.py::TestContextManagerFixtureFuncs::test_simple[yield_fixture]\", \"testing/python/fixtures.py::TestContextManagerFixtureFuncs::test_scoped[fixture]\", \"testing/python/fixtures.py::TestContextManagerFixtureFuncs::test_scoped[yield_fixture]\", \"testing/python/fixtures.py::TestContextManagerFixtureFuncs::test_setup_exception[fixture]\", \"testing/python/fixtures.py::TestContextManagerFixtureFuncs::test_setup_exception[yield_fixture]\", \"testing/python/fixtures.py::TestContextManagerFixtureFuncs::test_teardown_exception[fixture]\", \"testing/python/fixtures.py::TestContextManagerFixtureFuncs::test_teardown_exception[yield_fixture]\", \"testing/python/fixtures.py::TestContextManagerFixtureFuncs::test_yields_more_than_one[fixture]\", \"testing/python/fixtures.py::TestContextManagerFixtureFuncs::test_yields_more_than_one[yield_fixture]\", \"testing/python/fixtures.py::TestContextManagerFixtureFuncs::test_custom_name[fixture]\", \"testing/python/fixtures.py::TestContextManagerFixtureFuncs::test_custom_name[yield_fixture]\", \"testing/python/fixtures.py::TestParameterizedSubRequest::test_call_from_fixture\", \"testing/python/fixtures.py::TestParameterizedSubRequest::test_call_from_test\", \"testing/python/fixtures.py::TestParameterizedSubRequest::test_external_fixture\", \"testing/python/fixtures.py::TestParameterizedSubRequest::test_non_relative_path\", \"testing/python/fixtures.py::test_pytest_fixture_setup_and_post_finalizer_hook\", \"testing/python/fixtures.py::TestScopeOrdering::test_func_closure_module_auto[mark]\", \"testing/python/fixtures.py::TestScopeOrdering::test_func_closure_module_auto[autouse]\", \"testing/python/fixtures.py::TestScopeOrdering::test_func_closure_with_native_fixtures\", \"testing/python/fixtures.py::TestScopeOrdering::test_func_closure_module\", \"testing/python/fixtures.py::TestScopeOrdering::test_func_closure_scopes_reordered\", \"testing/python/fixtures.py::TestScopeOrdering::test_func_closure_same_scope_closer_root_first\", \"testing/python/fixtures.py::TestScopeOrdering::test_func_closure_all_scopes_complex\", \"testing/python/fixtures.py::TestScopeOrdering::test_multiple_packages\"]", + "expected_spans": { + "src/_pytest/python.py": [ + "_showfixtures_main" + ] + }, + "test_file_spans": { + "testing/python/fixtures.py": [ + "TestShowFixtures.test_show_fixtures", + "TestShowFixtures.test_show_fixtures_verbose" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "pytest-dev__pytest-5227", + "repo": "pytest-dev/pytest", + "base_commit": "2051e30b9b596e944524ccb787ed20f9f5be93e3", + "problem_statement": "Improve default logging format\nCurrently it is:\r\n\r\n> DEFAULT_LOG_FORMAT = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\r\n\r\nI think `name` (module name) would be very useful here, instead of just the base filename.\r\n\r\n(It might also be good to have the relative path there (maybe at the end), but it is usually still very long (but e.g. `$VIRTUAL_ENV` could be substituted therein))\r\n\r\nCurrently it would look like this:\r\n```\r\nutils.py 114 DEBUG (0.000) SELECT \"app_url\".\"id\", \"app_url\".\"created\", \"app_url\".\"url\" FROM \"app_url\" WHERE \"app_url\".\"id\" = 2; args=(2,)\r\nmultipart.py 604 DEBUG Calling on_field_start with no data\r\n```\r\n\r\n\r\nUsing `DEFAULT_LOG_FORMAT = \"%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s\"` instead:\r\n\r\n```\r\nDEBUG django.db.backends:utils.py:114 (0.000) SELECT \"app_url\".\"id\", \"app_url\".\"created\", \"app_url\".\"url\" FROM \"app_url\" WHERE \"app_url\".\"id\" = 2; args=(2,)\r\nDEBUG multipart.multipart:multipart.py:604 Calling on_field_start with no data\r\n```\n", + "golden_patch": "diff --git a/src/_pytest/logging.py b/src/_pytest/logging.py\n--- a/src/_pytest/logging.py\n+++ b/src/_pytest/logging.py\n@@ -15,7 +15,7 @@\n from _pytest.config import create_terminal_writer\n from _pytest.pathlib import Path\n \n-DEFAULT_LOG_FORMAT = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\n+DEFAULT_LOG_FORMAT = \"%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s\"\n DEFAULT_LOG_DATE_FORMAT = \"%H:%M:%S\"\n \n \n", + "test_patch": "diff --git a/testing/logging/test_reporting.py b/testing/logging/test_reporting.py\n--- a/testing/logging/test_reporting.py\n+++ b/testing/logging/test_reporting.py\n@@ -248,7 +248,7 @@ def test_log_cli():\n [\n \"test_log_cli_enabled_disabled.py::test_log_cli \",\n \"*-- live log call --*\",\n- \"test_log_cli_enabled_disabled.py* CRITICAL critical message logged by test\",\n+ \"CRITICAL *test_log_cli_enabled_disabled.py* critical message logged by test\",\n \"PASSED*\",\n ]\n )\n@@ -282,7 +282,7 @@ def test_log_cli(request):\n result.stdout.fnmatch_lines(\n [\n \"test_log_cli_default_level.py::test_log_cli \",\n- \"test_log_cli_default_level.py*WARNING message will be shown*\",\n+ \"WARNING*test_log_cli_default_level.py* message will be shown*\",\n ]\n )\n assert \"INFO message won't be shown\" not in result.stdout.str()\n@@ -523,7 +523,7 @@ def test_log_1(fix):\n )\n assert (\n re.search(\n- r\"(.+)live log teardown(.+)\\n(.+)WARNING(.+)\\n(.+)WARNING(.+)\",\n+ r\"(.+)live log teardown(.+)\\nWARNING(.+)\\nWARNING(.+)\",\n result.stdout.str(),\n re.MULTILINE,\n )\n@@ -531,7 +531,7 @@ def test_log_1(fix):\n )\n assert (\n re.search(\n- r\"(.+)live log finish(.+)\\n(.+)WARNING(.+)\\n(.+)WARNING(.+)\",\n+ r\"(.+)live log finish(.+)\\nWARNING(.+)\\nWARNING(.+)\",\n result.stdout.str(),\n re.MULTILINE,\n )\n@@ -565,7 +565,7 @@ def test_log_cli(request):\n # fnmatch_lines does an assertion internally\n result.stdout.fnmatch_lines(\n [\n- \"test_log_cli_level.py*This log message will be shown\",\n+ \"*test_log_cli_level.py*This log message will be shown\",\n \"PASSED\", # 'PASSED' on its own line because the log message prints a new line\n ]\n )\n@@ -579,7 +579,7 @@ def test_log_cli(request):\n # fnmatch_lines does an assertion internally\n result.stdout.fnmatch_lines(\n [\n- \"test_log_cli_level.py* This log message will be shown\",\n+ \"*test_log_cli_level.py* This log message will be shown\",\n \"PASSED\", # 'PASSED' on its own line because the log message prints a new line\n ]\n )\n@@ -615,7 +615,7 @@ def test_log_cli(request):\n # fnmatch_lines does an assertion internally\n result.stdout.fnmatch_lines(\n [\n- \"test_log_cli_ini_level.py* This log message will be shown\",\n+ \"*test_log_cli_ini_level.py* This log message will be shown\",\n \"PASSED\", # 'PASSED' on its own line because the log message prints a new line\n ]\n )\n", + "fail_to_pass": "[\"testing/logging/test_reporting.py::test_log_cli_enabled_disabled[True]\", \"testing/logging/test_reporting.py::test_log_cli_default_level\", \"testing/logging/test_reporting.py::test_sections_single_new_line_after_test_outcome\"]", + "pass_to_pass": "[\"[100%]\", \"[\", \"[100%]------------------------------\", \"testing/logging/test_reporting.py::test_live_logging_suspends_capture[True]\", \"testing/logging/test_reporting.py::test_live_logging_suspends_capture[False]\", \"testing/logging/test_reporting.py::test_nothing_logged\", \"testing/logging/test_reporting.py::test_messages_logged\", \"testing/logging/test_reporting.py::test_root_logger_affected\", \"testing/logging/test_reporting.py::test_log_cli_level_log_level_interaction\", \"testing/logging/test_reporting.py::test_setup_logging\", \"testing/logging/test_reporting.py::test_teardown_logging\", \"testing/logging/test_reporting.py::test_disable_log_capturing\", \"testing/logging/test_reporting.py::test_disable_log_capturing_ini\", \"testing/logging/test_reporting.py::test_log_cli_enabled_disabled[False]\", \"testing/logging/test_reporting.py::test_log_cli_default_level_multiple_tests\", \"testing/logging/test_reporting.py::test_log_cli_default_level_sections\", \"testing/logging/test_reporting.py::test_live_logs_unknown_sections\", \"testing/logging/test_reporting.py::test_log_cli_level\", \"testing/logging/test_reporting.py::test_log_cli_ini_level\", \"testing/logging/test_reporting.py::test_log_cli_auto_enable[]\", \"testing/logging/test_reporting.py::test_log_cli_auto_enable[--log-level=WARNING]\", \"testing/logging/test_reporting.py::test_log_cli_auto_enable[--log-file-level=WARNING]\", \"testing/logging/test_reporting.py::test_log_cli_auto_enable[--log-cli-level=WARNING]\", \"testing/logging/test_reporting.py::test_log_file_cli\", \"testing/logging/test_reporting.py::test_log_file_cli_level\", \"testing/logging/test_reporting.py::test_log_level_not_changed_by_default\", \"testing/logging/test_reporting.py::test_log_file_ini\", \"testing/logging/test_reporting.py::test_log_file_ini_level\", \"testing/logging/test_reporting.py::test_log_file_unicode\", \"testing/logging/test_reporting.py::test_collection_live_logging\", \"testing/logging/test_reporting.py::test_collection_logging_to_file\", \"testing/logging/test_reporting.py::test_log_in_hooks\", \"testing/logging/test_reporting.py::test_log_in_runtest_logreport\", \"testing/logging/test_reporting.py::test_log_set_path\"]", + "expected_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "test_file_spans": { + "testing/logging/test_reporting.py": [ + "test_log_cli_enabled_disabled", + "test_log_cli_default_level", + "test_sections_single_new_line_after_test_outcome", + "test_log_cli_level", + "test_log_cli_ini_level" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20231010_rag_claude2", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ], + "testing/logging/test_reporting.py": [ + "test_log_cli_enabled_disabled", + "test_log_cli_default_level" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports", + "ColoredLevelFormatter", + "ColoredLevelFormatter.__init__", + "ColoredLevelFormatter.format", + "get_option_ini", + "pytest_addoption", + "catching_logs", + "LogCaptureFixture.__init__", + "LogCaptureFixture._finalize", + "LogCaptureFixture.set_level", + "get_actual_log_level", + "pytest_configure", + "LoggingPlugin.__init__", + "LoggingPlugin._setup_cli_logging", + "LoggingPlugin.set_log_path", + "LoggingPlugin._log_cli_enabled", + "LoggingPlugin.pytest_collection", + "LoggingPlugin._runtest_for", + "LoggingPlugin._runtest_for_main", + "LoggingPlugin.pytest_runtest_setup", + "LoggingPlugin.pytest_runtest_call", + "LoggingPlugin.pytest_runtest_teardown", + "LoggingPlugin.pytest_runtest_logstart", + "LoggingPlugin.pytest_runtest_logfinish", + "LoggingPlugin.pytest_runtest_logreport", + "LoggingPlugin.pytest_sessionfinish", + "LoggingPlugin.pytest_sessionstart", + "LoggingPlugin", + "LoggingPlugin.pytest_runtestloop", + "_LiveLoggingStreamHandler.set_when", + "_LiveLoggingStreamHandler.emit" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports", + "ColoredLevelFormatter", + "ColoredLevelFormatter.__init__", + "ColoredLevelFormatter.format", + "get_option_ini", + "pytest_addoption", + "catching_logs", + "LogCaptureFixture.__init__", + "LogCaptureFixture._finalize", + "LogCaptureFixture.set_level", + "get_actual_log_level", + "pytest_configure", + "LoggingPlugin.__init__", + "LoggingPlugin._setup_cli_logging", + "LoggingPlugin.set_log_path", + "LoggingPlugin._log_cli_enabled", + "LoggingPlugin.pytest_collection", + "LoggingPlugin._runtest_for", + "LoggingPlugin._runtest_for_main", + "LoggingPlugin.pytest_runtest_setup", + "LoggingPlugin.pytest_runtest_call", + "LoggingPlugin.pytest_runtest_teardown", + "LoggingPlugin.pytest_runtest_logstart", + "LoggingPlugin.pytest_runtest_logfinish", + "LoggingPlugin.pytest_runtest_logreport", + "LoggingPlugin.pytest_sessionfinish", + "LoggingPlugin.pytest_sessionstart", + "LoggingPlugin", + "LoggingPlugin.pytest_runtestloop", + "_LiveLoggingStreamHandler.set_when", + "_LiveLoggingStreamHandler.emit" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ], + "testing/logging/test_formatter.py": [ + "test_coloredlogformatter" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ], + "testing/logging/test_formatter.py": [ + "test_coloredlogformatter" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240402_rag_claude3opus", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ], + "src/_pytest/nodes.py": [ + "get_fslocation_from_item" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ], + "testing/logging/test_formatter.py": [ + "test_coloredlogformatter" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/logging.py": [ + "imports" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-16792", + "repo": "sympy/sympy", + "base_commit": "09786a173e7a0a488f46dd6000177c23e5d24eed", + "problem_statement": "autowrap with cython backend fails when array arguments do not appear in wrapped expr\nWhen using the cython backend for autowrap, it appears that the code is not correctly generated when the function in question has array arguments that do not appear in the final expression. A minimal counterexample is:\r\n\r\n```python\r\nfrom sympy.utilities.autowrap import autowrap\r\nfrom sympy import MatrixSymbol\r\nimport numpy as np\r\n\r\nx = MatrixSymbol('x', 2, 1)\r\nexpr = 1.0\r\nf = autowrap(expr, args=(x,), backend='cython')\r\n\r\nf(np.array([[1.0, 2.0]]))\r\n```\r\n\r\nThis should of course return `1.0` but instead fails with:\r\n```python\r\nTypeError: only size-1 arrays can be converted to Python scalars\r\n```\r\n\r\nA little inspection reveals that this is because the corresponding C function is generated with an incorrect signature:\r\n\r\n```C\r\ndouble autofunc(double x) {\r\n\r\n double autofunc_result;\r\n autofunc_result = 1.0;\r\n return autofunc_result;\r\n\r\n}\r\n```\r\n\r\n(`x` should be `double *`, not `double` in this case)\r\n\r\nI've found that this error won't occur so long as `expr` depends at least in part on each argument. For example this slight modification of the above counterexample works perfectly:\r\n\r\n```python\r\nfrom sympy.utilities.autowrap import autowrap\r\nfrom sympy import MatrixSymbol\r\nimport numpy as np\r\n\r\nx = MatrixSymbol('x', 2, 1)\r\n# now output depends on x\r\nexpr = x[0,0]\r\nf = autowrap(expr, args=(x,), backend='cython')\r\n\r\n# returns 1.0 as expected, without failure\r\nf(np.array([[1.0, 2.0]]))\r\n```\r\n\r\nThis may seem like a silly issue (\"why even have `x` as an argument if it doesn't appear in the expression you're trying to evaluate?\"). But of course in interfacing with external libraries (e.g. for numerical integration), one often needs functions to have a pre-defined signature regardless of whether a given argument contributes to the output.\r\n\r\nI think I've identified the problem in `codegen` and will suggest a PR shortly.\n", + "golden_patch": "diff --git a/sympy/utilities/codegen.py b/sympy/utilities/codegen.py\n--- a/sympy/utilities/codegen.py\n+++ b/sympy/utilities/codegen.py\n@@ -695,6 +695,11 @@ def routine(self, name, expr, argument_sequence=None, global_vars=None):\n arg_list = []\n \n # setup input argument list\n+\n+ # helper to get dimensions for data for array-like args\n+ def dimensions(s):\n+ return [(S.Zero, dim - 1) for dim in s.shape]\n+\n array_symbols = {}\n for array in expressions.atoms(Indexed) | local_expressions.atoms(Indexed):\n array_symbols[array.base.label] = array\n@@ -703,11 +708,8 @@ def routine(self, name, expr, argument_sequence=None, global_vars=None):\n \n for symbol in sorted(symbols, key=str):\n if symbol in array_symbols:\n- dims = []\n array = array_symbols[symbol]\n- for dim in array.shape:\n- dims.append((S.Zero, dim - 1))\n- metadata = {'dimensions': dims}\n+ metadata = {'dimensions': dimensions(array)}\n else:\n metadata = {}\n \n@@ -739,7 +741,11 @@ def routine(self, name, expr, argument_sequence=None, global_vars=None):\n try:\n new_args.append(name_arg_dict[symbol])\n except KeyError:\n- new_args.append(InputArgument(symbol))\n+ if isinstance(symbol, (IndexedBase, MatrixSymbol)):\n+ metadata = {'dimensions': dimensions(symbol)}\n+ else:\n+ metadata = {}\n+ new_args.append(InputArgument(symbol, **metadata))\n arg_list = new_args\n \n return Routine(name, arg_list, return_val, local_vars, global_vars)\n", + "test_patch": "diff --git a/sympy/utilities/tests/test_codegen.py b/sympy/utilities/tests/test_codegen.py\n--- a/sympy/utilities/tests/test_codegen.py\n+++ b/sympy/utilities/tests/test_codegen.py\n@@ -582,6 +582,25 @@ def test_ccode_cse():\n )\n assert source == expected\n \n+def test_ccode_unused_array_arg():\n+ x = MatrixSymbol('x', 2, 1)\n+ # x does not appear in output\n+ name_expr = (\"test\", 1.0)\n+ generator = CCodeGen()\n+ result = codegen(name_expr, code_gen=generator, header=False, empty=False, argument_sequence=(x,))\n+ source = result[0][1]\n+ # note: x should appear as (double *)\n+ expected = (\n+ '#include \"test.h\"\\n'\n+ '#include \\n'\n+ 'double test(double *x) {\\n'\n+ ' double test_result;\\n'\n+ ' test_result = 1.0;\\n'\n+ ' return test_result;\\n'\n+ '}\\n'\n+ )\n+ assert source == expected\n+\n def test_empty_f_code():\n code_gen = FCodeGen()\n source = get_string(code_gen.dump_f95, [])\n", + "fail_to_pass": "[\"test_ccode_unused_array_arg\"]", + "pass_to_pass": "[\"test_Routine_argument_order\", \"test_empty_c_code\", \"test_empty_c_code_with_comment\", \"test_empty_c_header\", \"test_simple_c_code\", \"test_c_code_reserved_words\", \"test_numbersymbol_c_code\", \"test_c_code_argument_order\", \"test_simple_c_header\", \"test_simple_c_codegen\", \"test_multiple_results_c\", \"test_no_results_c\", \"test_ansi_math1_codegen\", \"test_ansi_math2_codegen\", \"test_complicated_codegen\", \"test_loops_c\", \"test_dummy_loops_c\", \"test_partial_loops_c\", \"test_output_arg_c\", \"test_output_arg_c_reserved_words\", \"test_ccode_results_named_ordered\", \"test_ccode_matrixsymbol_slice\", \"test_ccode_cse\", \"test_empty_f_code\", \"test_empty_f_code_with_header\", \"test_empty_f_header\", \"test_simple_f_code\", \"test_numbersymbol_f_code\", \"test_erf_f_code\", \"test_f_code_argument_order\", \"test_simple_f_header\", \"test_simple_f_codegen\", \"test_multiple_results_f\", \"test_no_results_f\", \"test_intrinsic_math_codegen\", \"test_intrinsic_math2_codegen\", \"test_complicated_codegen_f95\", \"test_loops\", \"test_dummy_loops_f95\", \"test_loops_InOut\", \"test_partial_loops_f\", \"test_output_arg_f\", \"test_inline_function\", \"test_f_code_call_signature_wrap\", \"test_check_case\", \"test_check_case_false_positive\", \"test_c_fortran_omit_routine_name\", \"test_fcode_matrix_output\", \"test_fcode_results_named_ordered\", \"test_fcode_matrixsymbol_slice\", \"test_fcode_matrixsymbol_slice_autoname\", \"test_global_vars\", \"test_custom_codegen\", \"test_c_with_printer\"]", + "expected_spans": { + "sympy/utilities/codegen.py": [ + "CodeGen.routine" + ] + }, + "test_file_spans": { + "sympy/utilities/tests/test_codegen.py": [ + "test_empty_f_code" + ] + }, + "resolved_by": [ + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sympy/utilities/autowrap.py": [ + "imports", + "CythonCodeWrapper._prototype_arg", + "CythonCodeWrapper._declare_arg", + "CythonCodeWrapper._call_arg" + ], + "sympy/utilities/codegen.py": [ + "CCodeGen.get_prototype" + ] + }, + "alternative_spans": { + "sympy/utilities/codegen.py": [ + "CCodeGen.get_prototype" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "spans": { + "sympy/utilities/codegen.py": [ + "CCodeGen.get_prototype" + ] + } + } + ] + }, + { + "instance_id": "django__django-11422", + "repo": "django/django", + "base_commit": "df46b329e0900e9e4dc1d60816c1dce6dfc1094e", + "problem_statement": "Autoreloader with StatReloader doesn't track changes in manage.py.\nDescription\n\t \n\t\t(last modified by Mariusz Felisiak)\n\t \nThis is a bit convoluted, but here we go.\nEnvironment (OSX 10.11):\n$ python -V\nPython 3.6.2\n$ pip -V\npip 19.1.1\n$ pip install Django==2.2.1\nSteps to reproduce:\nRun a server python manage.py runserver\nEdit the manage.py file, e.g. add print(): \ndef main():\n\tprint('sth')\n\tos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ticket_30479.settings')\n\t...\nUnder 2.1.8 (and prior), this will trigger the auto-reloading mechanism. Under 2.2.1, it won't. As far as I can tell from the django.utils.autoreload log lines, it never sees the manage.py itself.\n", + "golden_patch": "diff --git a/django/utils/autoreload.py b/django/utils/autoreload.py\n--- a/django/utils/autoreload.py\n+++ b/django/utils/autoreload.py\n@@ -114,7 +114,15 @@ def iter_modules_and_files(modules, extra_files):\n # During debugging (with PyDev) the 'typing.io' and 'typing.re' objects\n # are added to sys.modules, however they are types not modules and so\n # cause issues here.\n- if not isinstance(module, ModuleType) or getattr(module, '__spec__', None) is None:\n+ if not isinstance(module, ModuleType):\n+ continue\n+ if module.__name__ == '__main__':\n+ # __main__ (usually manage.py) doesn't always have a __spec__ set.\n+ # Handle this by falling back to using __file__, resolved below.\n+ # See https://docs.python.org/reference/import.html#main-spec\n+ sys_file_paths.append(module.__file__)\n+ continue\n+ if getattr(module, '__spec__', None) is None:\n continue\n spec = module.__spec__\n # Modules could be loaded from places without a concrete location. If\n", + "test_patch": "diff --git a/tests/utils_tests/test_autoreload.py b/tests/utils_tests/test_autoreload.py\n--- a/tests/utils_tests/test_autoreload.py\n+++ b/tests/utils_tests/test_autoreload.py\n@@ -132,6 +132,10 @@ def test_module_without_spec(self):\n del module.__spec__\n self.assertEqual(autoreload.iter_modules_and_files((module,), frozenset()), frozenset())\n \n+ def test_main_module_is_resolved(self):\n+ main_module = sys.modules['__main__']\n+ self.assertFileFound(Path(main_module.__file__))\n+\n \n class TestCommonRoots(SimpleTestCase):\n def test_common_roots(self):\n", + "fail_to_pass": "[\"test_main_module_is_resolved (utils_tests.test_autoreload.TestIterModulesAndFiles)\"]", + "pass_to_pass": "[\"test_watchman_available (utils_tests.test_autoreload.GetReloaderTests)\", \"test_watchman_unavailable (utils_tests.test_autoreload.GetReloaderTests)\", \"test_calls_start_django (utils_tests.test_autoreload.RunWithReloaderTests)\", \"test_calls_sys_exit (utils_tests.test_autoreload.RunWithReloaderTests)\", \"test_swallows_keyboard_interrupt (utils_tests.test_autoreload.RunWithReloaderTests)\", \"test_common_roots (utils_tests.test_autoreload.TestCommonRoots)\", \"test_no_exception (utils_tests.test_autoreload.TestRaiseLastException)\", \"test_raises_exception (utils_tests.test_autoreload.TestRaiseLastException)\", \"test_mutates_error_files (utils_tests.test_autoreload.TestCheckErrors)\", \"test_sys_paths_absolute (utils_tests.test_autoreload.TestSysPathDirectories)\", \"test_sys_paths_directories (utils_tests.test_autoreload.TestSysPathDirectories)\", \"test_sys_paths_non_existing (utils_tests.test_autoreload.TestSysPathDirectories)\", \"test_sys_paths_with_directories (utils_tests.test_autoreload.TestSysPathDirectories)\", \"test_manage_py (utils_tests.test_autoreload.RestartWithReloaderTests)\", \"test_python_m_django (utils_tests.test_autoreload.RestartWithReloaderTests)\", \"test_run_loop_catches_stopiteration (utils_tests.test_autoreload.BaseReloaderTests)\", \"test_run_loop_stop_and_return (utils_tests.test_autoreload.BaseReloaderTests)\", \"test_wait_for_apps_ready_checks_for_exception (utils_tests.test_autoreload.BaseReloaderTests)\", \"test_wait_for_apps_ready_without_exception (utils_tests.test_autoreload.BaseReloaderTests)\", \"test_watch_files_with_recursive_glob (utils_tests.test_autoreload.BaseReloaderTests)\", \"test_watch_with_glob (utils_tests.test_autoreload.BaseReloaderTests)\", \"test_watch_with_single_file (utils_tests.test_autoreload.BaseReloaderTests)\", \"test_watch_without_absolute (utils_tests.test_autoreload.BaseReloaderTests)\", \"test_file (utils_tests.test_autoreload.StatReloaderTests)\", \"test_glob (utils_tests.test_autoreload.StatReloaderTests)\", \"test_glob_recursive (utils_tests.test_autoreload.StatReloaderTests)\", \"test_multiple_globs (utils_tests.test_autoreload.StatReloaderTests)\", \"test_multiple_recursive_globs (utils_tests.test_autoreload.StatReloaderTests)\", \"test_nested_glob_recursive (utils_tests.test_autoreload.StatReloaderTests)\", \"test_overlapping_glob_recursive (utils_tests.test_autoreload.StatReloaderTests)\", \"test_overlapping_globs (utils_tests.test_autoreload.StatReloaderTests)\", \"test_snapshot_files_ignores_missing_files (utils_tests.test_autoreload.StatReloaderTests)\", \"test_snapshot_files_updates (utils_tests.test_autoreload.StatReloaderTests)\", \"test_snapshot_files_with_duplicates (utils_tests.test_autoreload.StatReloaderTests)\", \"test_check_errors_called (utils_tests.test_autoreload.StartDjangoTests)\", \"test_echo_on_called (utils_tests.test_autoreload.StartDjangoTests)\", \"test_starts_thread_with_args (utils_tests.test_autoreload.StartDjangoTests)\", \"test_watchman_becomes_unavailable (utils_tests.test_autoreload.StartDjangoTests)\", \".pyc and .pyo files are included in the files list.\", \"test_check_errors (utils_tests.test_autoreload.TestIterModulesAndFiles)\", \"test_check_errors_catches_all_exceptions (utils_tests.test_autoreload.TestIterModulesAndFiles)\", \"test_file_added (utils_tests.test_autoreload.TestIterModulesAndFiles)\", \"test_module_without_spec (utils_tests.test_autoreload.TestIterModulesAndFiles)\", \"test_paths_are_pathlib_instances (utils_tests.test_autoreload.TestIterModulesAndFiles)\", \"iter_all_python_module_file() ignores weakref modules.\", \"test_zip_reload (utils_tests.test_autoreload.TestIterModulesAndFiles)\"]", + "expected_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "test_file_spans": { + "tests/utils_tests/test_autoreload.py": [ + "TestIterModulesAndFiles" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/utils/autoreload.py": [ + "imports", + "check_errors", + "iter_all_python_module_files", + "iter_modules_and_files", + "common_roots", + "sys_path_directories", + "get_child_arguments", + "BaseReloader.__init__", + "BaseReloader.wait_for_apps_ready", + "BaseReloader.run", + "BaseReloader.check_availability", + "BaseReloader.notify_file_changed", + "StatReloader", + "StatReloader.tick", + "StatReloader.snapshot_files", + "WatchmanReloader.__init__", + "WatchmanReloader._watch_root", + "WatchmanReloader._subscribe", + "WatchmanReloader._subscribe_dir", + "WatchmanReloader._watch_glob", + "WatchmanReloader._update_watches", + "WatchmanReloader.update_watches", + "WatchmanReloader._check_subscription", + "WatchmanReloader.check_availability", + "start_django", + "run_with_reloader" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "imports", + "check_errors", + "iter_all_python_module_files", + "iter_modules_and_files", + "common_roots", + "sys_path_directories", + "get_child_arguments", + "BaseReloader.__init__", + "BaseReloader.wait_for_apps_ready", + "BaseReloader.run", + "BaseReloader.check_availability", + "BaseReloader.notify_file_changed", + "StatReloader", + "StatReloader.tick", + "StatReloader.snapshot_files", + "WatchmanReloader.__init__", + "WatchmanReloader._watch_root", + "WatchmanReloader._subscribe", + "WatchmanReloader._subscribe_dir", + "WatchmanReloader._watch_glob", + "WatchmanReloader._update_watches", + "WatchmanReloader.update_watches", + "WatchmanReloader._check_subscription", + "WatchmanReloader.check_availability", + "start_django", + "run_with_reloader" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ], + "tests/utils_tests/test_autoreload.py": [ + "IntegrationTests.test_file" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240523_aider", + "spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + } + }, + { + "run_name": "20240630_agentless_gpt4o", + "spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + } + }, + { + "run_name": "20240925_hyperagent_lite1", + "spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + } + }, + { + "run_name": "20240808_RepoGraph_gpt4o", + "spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + } + }, + { + "run_name": "20240829_Isoform", + "spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + } + }, + { + "run_name": "20241028_agentless-1.5_gpt4o", + "spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + } + }, + { + "run_name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + } + }, + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "django/utils/autoreload.py": [ + "iter_all_python_module_files" + ] + } + } + ] + }, + { + "instance_id": "pytest-dev__pytest-5413", + "repo": "pytest-dev/pytest", + "base_commit": "450d2646233c670654744d3d24330b69895bb9d2", + "problem_statement": "str() on the pytest.raises context variable doesn't behave same as normal exception catch\nPytest 4.6.2, macOS 10.14.5\r\n\r\n```Python\r\ntry:\r\n raise LookupError(\r\n f\"A\\n\"\r\n f\"B\\n\"\r\n f\"C\"\r\n )\r\nexcept LookupError as e:\r\n print(str(e))\r\n```\r\nprints\r\n\r\n> A\r\n> B\r\n> C\r\n\r\nBut\r\n\r\n```Python\r\nwith pytest.raises(LookupError) as e:\r\n raise LookupError(\r\n f\"A\\n\"\r\n f\"B\\n\"\r\n f\"C\"\r\n )\r\n\r\nprint(str(e))\r\n```\r\n\r\nprints\r\n\r\n> :3: LookupError: A\r\n\r\nIn order to get the full error message, one must do `str(e.value)`, which is documented, but this is a different interaction. Any chance the behavior could be changed to eliminate this gotcha?\r\n\r\n-----\r\n\r\nPip list gives\r\n\r\n```\r\nPackage Version Location\r\n------------------ -------- ------------------------------------------------------\r\napipkg 1.5\r\nasn1crypto 0.24.0\r\natomicwrites 1.3.0\r\nattrs 19.1.0\r\naws-xray-sdk 0.95\r\nboto 2.49.0\r\nboto3 1.9.51\r\nbotocore 1.12.144\r\ncertifi 2019.3.9\r\ncffi 1.12.3\r\nchardet 3.0.4\r\nClick 7.0\r\ncodacy-coverage 1.3.11\r\ncolorama 0.4.1\r\ncoverage 4.5.3\r\ncryptography 2.6.1\r\ndecorator 4.4.0\r\ndocker 3.7.2\r\ndocker-pycreds 0.4.0\r\ndocutils 0.14\r\necdsa 0.13.2\r\nexecnet 1.6.0\r\nfuture 0.17.1\r\nidna 2.8\r\nimportlib-metadata 0.17\r\nipaddress 1.0.22\r\nJinja2 2.10.1\r\njmespath 0.9.4\r\njsondiff 1.1.1\r\njsonpickle 1.1\r\njsonschema 2.6.0\r\nMarkupSafe 1.1.1\r\nmock 3.0.4\r\nmore-itertools 7.0.0\r\nmoto 1.3.7\r\nneobolt 1.7.10\r\nneotime 1.7.4\r\nnetworkx 2.1\r\nnumpy 1.15.0\r\npackaging 19.0\r\npandas 0.24.2\r\npip 19.1.1\r\npluggy 0.12.0\r\nprompt-toolkit 2.0.9\r\npy 1.8.0\r\npy2neo 4.2.0\r\npyaml 19.4.1\r\npycodestyle 2.5.0\r\npycparser 2.19\r\npycryptodome 3.8.1\r\nPygments 2.3.1\r\npyOpenSSL 19.0.0\r\npyparsing 2.4.0\r\npytest 4.6.2\r\npytest-cache 1.0\r\npytest-codestyle 1.4.0\r\npytest-cov 2.6.1\r\npytest-forked 1.0.2\r\npython-dateutil 2.7.3\r\npython-jose 2.0.2\r\npytz 2018.5\r\nPyYAML 5.1\r\nrequests 2.21.0\r\nrequests-mock 1.5.2\r\nresponses 0.10.6\r\ns3transfer 0.1.13\r\nsetuptools 41.0.1\r\nsix 1.11.0\r\nsqlite3worker 1.1.7\r\ntabulate 0.8.3\r\nurllib3 1.24.3\r\nwcwidth 0.1.7\r\nwebsocket-client 0.56.0\r\nWerkzeug 0.15.2\r\nwheel 0.33.1\r\nwrapt 1.11.1\r\nxlrd 1.1.0\r\nxmltodict 0.12.0\r\nzipp 0.5.1\r\n```\n", + "golden_patch": "diff --git a/src/_pytest/_code/code.py b/src/_pytest/_code/code.py\n--- a/src/_pytest/_code/code.py\n+++ b/src/_pytest/_code/code.py\n@@ -534,13 +534,6 @@ def getrepr(\n )\n return fmt.repr_excinfo(self)\n \n- def __str__(self):\n- if self._excinfo is None:\n- return repr(self)\n- entry = self.traceback[-1]\n- loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())\n- return str(loc)\n-\n def match(self, regexp):\n \"\"\"\n Check whether the regular expression 'regexp' is found in the string\n", + "test_patch": "diff --git a/testing/code/test_excinfo.py b/testing/code/test_excinfo.py\n--- a/testing/code/test_excinfo.py\n+++ b/testing/code/test_excinfo.py\n@@ -333,18 +333,10 @@ def test_excinfo_exconly():\n assert msg.endswith(\"world\")\n \n \n-def test_excinfo_repr():\n+def test_excinfo_repr_str():\n excinfo = pytest.raises(ValueError, h)\n- s = repr(excinfo)\n- assert s == \"\"\n-\n-\n-def test_excinfo_str():\n- excinfo = pytest.raises(ValueError, h)\n- s = str(excinfo)\n- assert s.startswith(__file__[:-9]) # pyc file and $py.class\n- assert s.endswith(\"ValueError\")\n- assert len(s.split(\":\")) >= 3 # on windows it's 4\n+ assert repr(excinfo) == \"\"\n+ assert str(excinfo) == \"\"\n \n \n def test_excinfo_for_later():\n", + "fail_to_pass": "[\"testing/code/test_excinfo.py::test_excinfo_repr_str\"]", + "pass_to_pass": "[\"testing/code/test_excinfo.py::test_excinfo_simple\", \"testing/code/test_excinfo.py::test_excinfo_getstatement\", \"testing/code/test_excinfo.py::TestTraceback_f_g_h::test_traceback_entries\", \"testing/code/test_excinfo.py::TestTraceback_f_g_h::test_traceback_entry_getsource\", \"testing/code/test_excinfo.py::TestTraceback_f_g_h::test_traceback_entry_getsource_in_construct\", \"testing/code/test_excinfo.py::TestTraceback_f_g_h::test_traceback_cut\", \"testing/code/test_excinfo.py::TestTraceback_f_g_h::test_traceback_filter\", \"testing/code/test_excinfo.py::TestTraceback_f_g_h::test_traceback_filter_selective[-True]\", \"testing/code/test_excinfo.py::TestTraceback_f_g_h::test_traceback_filter_selective[-False]\", \"testing/code/test_excinfo.py::TestTraceback_f_g_h::test_traceback_filter_selective[tracebackhide2-True]\", \"testing/code/test_excinfo.py::TestTraceback_f_g_h::test_traceback_filter_selective[tracebackhide3-False]\", \"testing/code/test_excinfo.py::TestTraceback_f_g_h::test_traceback_recursion_index\", \"testing/code/test_excinfo.py::TestTraceback_f_g_h::test_traceback_only_specific_recursion_errors\", \"testing/code/test_excinfo.py::TestTraceback_f_g_h::test_traceback_no_recursion_index\", \"testing/code/test_excinfo.py::TestTraceback_f_g_h::test_traceback_getcrashentry\", \"testing/code/test_excinfo.py::TestTraceback_f_g_h::test_traceback_getcrashentry_empty\", \"testing/code/test_excinfo.py::test_excinfo_exconly\", \"testing/code/test_excinfo.py::test_excinfo_for_later\", \"testing/code/test_excinfo.py::test_excinfo_errisinstance\", \"testing/code/test_excinfo.py::test_excinfo_no_sourcecode\", \"testing/code/test_excinfo.py::test_entrysource_Queue_example\", \"testing/code/test_excinfo.py::test_codepath_Queue_example\", \"testing/code/test_excinfo.py::test_match_succeeds\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source_excinfo\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source_not_existing\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_many_line_source_not_existing\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source_failing_fullsource\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_local\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_local_with_error\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_local_with_exception_in_class_property\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_local_truncated\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_tracebackentry_lines\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_tracebackentry_lines2\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_tracebackentry_lines_var_kw_args\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_tracebackentry_short\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_tracebackentry_no\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_traceback_tbfilter\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_traceback_short_no_source\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_traceback_and_excinfo\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_traceback_with_invalid_cwd\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_excinfo_addouterr\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_excinfo_reprcrash\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_traceback_recursion\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_reprexcinfo_getrepr\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_reprexcinfo_unicode\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_toterminal_long\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_toterminal_long_missing_source\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_toterminal_long_incomplete_source\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_toterminal_long_filenames\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions0]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions1]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions2]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions3]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions4]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions5]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions6]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions7]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions8]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions9]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions10]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions11]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions12]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions13]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions14]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions15]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions16]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions17]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions18]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions19]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions20]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions21]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions22]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_format_excinfo[reproptions23]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_traceback_repr_style\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_exc_chain_repr\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_exc_repr_chain_suppression[from_none]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_exc_repr_chain_suppression[explicit_suppress]\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_exc_chain_repr_without_traceback[cause-The\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_exc_chain_repr_without_traceback[context-During\", \"testing/code/test_excinfo.py::TestFormattedExcinfo::test_exc_chain_repr_cycle\", \"testing/code/test_excinfo.py::test_repr_traceback_with_unicode[None-short]\", \"testing/code/test_excinfo.py::test_repr_traceback_with_unicode[None-long]\", \"testing/code/test_excinfo.py::test_repr_traceback_with_unicode[utf8-short]\", \"testing/code/test_excinfo.py::test_repr_traceback_with_unicode[utf8-long]\", \"testing/code/test_excinfo.py::test_repr_traceback_with_unicode[utf16-short]\", \"testing/code/test_excinfo.py::test_repr_traceback_with_unicode[utf16-long]\", \"testing/code/test_excinfo.py::test_exception_repr_extraction_error_on_recursion\", \"testing/code/test_excinfo.py::test_no_recursion_index_on_recursion_error\", \"testing/code/test_excinfo.py::TestTraceback_f_g_h::test_traceback_cut_excludepath\", \"testing/code/test_excinfo.py::test_match_raises_error\", \"testing/code/test_excinfo.py::test_cwd_deleted\"]", + "expected_spans": { + "src/_pytest/_code/code.py": [ + "ExceptionInfo.__str__" + ] + }, + "test_file_spans": { + "testing/code/test_excinfo.py": [ + "test_excinfo_repr", + "test_excinfo_str" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-16988", + "repo": "sympy/sympy", + "base_commit": "e727339af6dc22321b00f52d971cda39e4ce89fb", + "problem_statement": "Intersection should remove duplicates\n```python\r\n>>> Intersection({1},{1},{x})\r\nEmptySet()\r\n>>> Intersection({1},{x})\r\n{1}\r\n```\r\nThe answer should be `Piecewise(({1}, Eq(x, 1)), (S.EmptySet, True))` or remain unevaluated.\r\n\r\nThe routine should give the same answer if duplicates are present; my initial guess is that duplicates should just be removed at the outset of instantiation. Ordering them will produce canonical processing.\n", + "golden_patch": "diff --git a/sympy/sets/sets.py b/sympy/sets/sets.py\n--- a/sympy/sets/sets.py\n+++ b/sympy/sets/sets.py\n@@ -1260,7 +1260,7 @@ def __new__(cls, *args, **kwargs):\n evaluate = kwargs.get('evaluate', global_evaluate[0])\n \n # flatten inputs to merge intersections and iterables\n- args = _sympify(args)\n+ args = list(ordered(set(_sympify(args))))\n \n # Reduce sets using known rules\n if evaluate:\n", + "test_patch": "diff --git a/sympy/sets/tests/test_sets.py b/sympy/sets/tests/test_sets.py\n--- a/sympy/sets/tests/test_sets.py\n+++ b/sympy/sets/tests/test_sets.py\n@@ -21,7 +21,7 @@ def test_imageset():\n assert imageset(x, abs(x), S.Integers) is S.Naturals0\n # issue 16878a\n r = symbols('r', real=True)\n- assert (1, r) not in imageset(x, (x, x), S.Reals)\n+ assert (1, r) in imageset(x, (x, x), S.Reals) != False\n assert (r, r) in imageset(x, (x, x), S.Reals)\n assert 1 + I in imageset(x, x + I, S.Reals)\n assert {1} not in imageset(x, (x,), S.Reals)\n@@ -342,6 +342,9 @@ def test_intersection():\n # issue 12178\n assert Intersection() == S.UniversalSet\n \n+ # issue 16987\n+ assert Intersection({1}, {1}, {x}) == Intersection({1}, {x})\n+\n \n def test_issue_9623():\n n = Symbol('n')\n", + "fail_to_pass": "[\"test_imageset\", \"test_intersection\"]", + "pass_to_pass": "[\"test_interval_arguments\", \"test_interval_symbolic_end_points\", \"test_union\", \"test_union_iter\", \"test_difference\", \"test_Complement\", \"test_complement\", \"test_intersect1\", \"test_issue_9623\", \"test_is_disjoint\", \"test_ProductSet_of_single_arg_is_arg\", \"test_interval_subs\", \"test_interval_to_mpi\", \"test_measure\", \"test_is_subset\", \"test_is_proper_subset\", \"test_is_superset\", \"test_is_proper_superset\", \"test_contains\", \"test_interval_symbolic\", \"test_union_contains\", \"test_is_number\", \"test_Interval_is_left_unbounded\", \"test_Interval_is_right_unbounded\", \"test_Interval_as_relational\", \"test_Finite_as_relational\", \"test_Union_as_relational\", \"test_Intersection_as_relational\", \"test_EmptySet\", \"test_finite_basic\", \"test_powerset\", \"test_product_basic\", \"test_real\", \"test_supinf\", \"test_universalset\", \"test_Union_of_ProductSets_shares\", \"test_Interval_free_symbols\", \"test_image_interval\", \"test_image_piecewise\", \"test_image_FiniteSet\", \"test_image_Union\", \"test_image_EmptySet\", \"test_issue_5724_7680\", \"test_boundary\", \"test_boundary_Union\", \"test_boundary_ProductSet\", \"test_boundary_ProductSet_line\", \"test_is_open\", \"test_is_closed\", \"test_closure\", \"test_interior\", \"test_issue_7841\", \"test_Eq\", \"test_SymmetricDifference\", \"test_issue_9536\", \"test_issue_9637\", \"test_issue_9956\", \"test_issue_Symbol_inter\", \"test_issue_11827\", \"test_issue_10113\", \"test_issue_10248\", \"test_issue_9447\", \"test_issue_10337\", \"test_issue_10326\", \"test_issue_2799\", \"test_issue_9706\", \"test_issue_8257\", \"test_issue_10931\", \"test_issue_11174\", \"test_finite_set_intersection\", \"test_union_intersection_constructor\", \"test_Union_contains\"]", + "expected_spans": { + "sympy/sets/sets.py": [ + "Intersection.__new__" + ] + }, + "test_file_spans": { + "sympy/sets/tests/test_sets.py": [ + "test_imageset", + "test_intersection" + ] + }, + "resolved_by": [ + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sympy/sets/sets.py": [ + "Intersection.__new__" + ] + }, + "alternative_spans": { + "sympy/sets/sets.py": [ + "Intersection.__new__" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sympy/sets/sets.py": [ + "simplify_union", + "simplify_intersection" + ] + }, + "alternative_spans": { + "sympy/sets/sets.py": [ + "simplify_union", + "simplify_intersection" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sympy/sets/sets.py": [ + "Intersection.__new__" + ] + }, + "alternative_spans": { + "sympy/sets/sets.py": [ + "Intersection.__new__" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "sympy/sets/sets.py": [ + "Intersection.__new__" + ] + }, + "alternative_spans": { + "sympy/sets/sets.py": [ + "Intersection.__new__" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "sympy/sets/sets.py": [ + "Intersection.__new__" + ] + }, + "alternative_spans": { + "sympy/sets/sets.py": [ + "Intersection.__new__" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "sympy/sets/sets.py": [ + "Intersection.__new__" + ] + }, + "alternative_spans": { + "sympy/sets/sets.py": [ + "Intersection.__new__" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sympy/sets/sets.py": [ + "Intersection.__new__" + ] + }, + "alternative_spans": { + "sympy/sets/sets.py": [ + "Intersection.__new__" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240820_honeycomb", + "spans": { + "sympy/sets/sets.py": [ + "simplify_union", + "simplify_intersection" + ] + } + } + ] + }, + { + "instance_id": "sympy__sympy-17022", + "repo": "sympy/sympy", + "base_commit": "f91de695585c1fbc7d4f49ee061f64fcb1c2c4d8", + "problem_statement": "Lambdify misinterprets some matrix expressions\nUsing lambdify on an expression containing an identity matrix gives us an unexpected result:\r\n\r\n```python\r\n>>> import numpy as np\r\n>>> n = symbols('n', integer=True)\r\n>>> A = MatrixSymbol(\"A\", n, n)\r\n>>> a = np.array([[1, 2], [3, 4]])\r\n>>> f = lambdify(A, A + Identity(n))\r\n>>> f(a)\r\narray([[1.+1.j, 2.+1.j],\r\n [3.+1.j, 4.+1.j]])\r\n```\r\n\r\nInstead, the output should be `array([[2, 2], [3, 5]])`, since we're adding an identity matrix to the array. Inspecting the globals and source code of `f` shows us why we get the result:\r\n\r\n```python\r\n>>> import inspect\r\n>>> print(inspect.getsource(f))\r\ndef _lambdifygenerated(A):\r\n return (I + A)\r\n>>> f.__globals__['I']\r\n1j\r\n```\r\n\r\nThe code printer prints `I`, which is currently being interpreted as a Python built-in complex number. The printer should support printing identity matrices, and signal an error for unsupported expressions that might be misinterpreted.\n", + "golden_patch": "diff --git a/sympy/printing/pycode.py b/sympy/printing/pycode.py\n--- a/sympy/printing/pycode.py\n+++ b/sympy/printing/pycode.py\n@@ -608,6 +608,13 @@ def _print_MatrixBase(self, expr):\n func = self._module_format('numpy.array')\n return \"%s(%s)\" % (func, self._print(expr.tolist()))\n \n+ def _print_Identity(self, expr):\n+ shape = expr.shape\n+ if all([dim.is_Integer for dim in shape]):\n+ return \"%s(%s)\" % (self._module_format('numpy.eye'), self._print(expr.shape[0]))\n+ else:\n+ raise NotImplementedError(\"Symbolic matrix dimensions are not yet supported for identity matrices\")\n+\n def _print_BlockMatrix(self, expr):\n return '{0}({1})'.format(self._module_format('numpy.block'),\n self._print(expr.args[0].tolist()))\n", + "test_patch": "diff --git a/sympy/printing/tests/test_numpy.py b/sympy/printing/tests/test_numpy.py\n--- a/sympy/printing/tests/test_numpy.py\n+++ b/sympy/printing/tests/test_numpy.py\n@@ -1,6 +1,6 @@\n from sympy import (\n Piecewise, lambdify, Equality, Unequality, Sum, Mod, cbrt, sqrt,\n- MatrixSymbol, BlockMatrix\n+ MatrixSymbol, BlockMatrix, Identity\n )\n from sympy import eye\n from sympy.abc import x, i, j, a, b, c, d\n@@ -11,7 +11,7 @@\n from sympy.printing.lambdarepr import NumPyPrinter\n \n from sympy.utilities.pytest import warns_deprecated_sympy\n-from sympy.utilities.pytest import skip\n+from sympy.utilities.pytest import skip, raises\n from sympy.external import import_module\n \n np = import_module('numpy')\n@@ -252,3 +252,21 @@ def test_16857():\n \n printer = NumPyPrinter()\n assert printer.doprint(A) == 'numpy.block([[a_1, a_2], [a_3, a_4]])'\n+\n+\n+def test_issue_17006():\n+ if not np:\n+ skip(\"NumPy not installed\")\n+\n+ M = MatrixSymbol(\"M\", 2, 2)\n+\n+ f = lambdify(M, M + Identity(2))\n+ ma = np.array([[1, 2], [3, 4]])\n+ mr = np.array([[2, 2], [3, 5]])\n+\n+ assert (f(ma) == mr).all()\n+\n+ from sympy import symbols\n+ n = symbols('n', integer=True)\n+ N = MatrixSymbol(\"M\", n, n)\n+ raises(NotImplementedError, lambda: lambdify(N, N + Identity(n)))\ndiff --git a/sympy/printing/tests/test_pycode.py b/sympy/printing/tests/test_pycode.py\n--- a/sympy/printing/tests/test_pycode.py\n+++ b/sympy/printing/tests/test_pycode.py\n@@ -7,7 +7,7 @@\n from sympy.core.numbers import pi\n from sympy.functions import acos, Piecewise, sign\n from sympy.logic import And, Or\n-from sympy.matrices import SparseMatrix, MatrixSymbol\n+from sympy.matrices import SparseMatrix, MatrixSymbol, Identity\n from sympy.printing.pycode import (\n MpmathPrinter, NumPyPrinter, PythonCodePrinter, pycode, SciPyPrinter\n )\n@@ -49,6 +49,7 @@ def test_NumPyPrinter():\n A = MatrixSymbol(\"A\", 2, 2)\n assert p.doprint(A**(-1)) == \"numpy.linalg.inv(A)\"\n assert p.doprint(A**5) == \"numpy.linalg.matrix_power(A, 5)\"\n+ assert p.doprint(Identity(3)) == \"numpy.eye(3)\"\n \n \n def test_SciPyPrinter():\n", + "fail_to_pass": "[\"test_NumPyPrinter\"]", + "pass_to_pass": "[\"test_numpy_piecewise_regression\", \"test_PythonCodePrinter\", \"test_MpmathPrinter\", \"test_SciPyPrinter\", \"test_pycode_reserved_words\", \"test_printmethod\", \"test_codegen_ast_nodes\", \"test_issue_14283\"]", + "expected_spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter._print_BlockMatrix" + ] + }, + "test_file_spans": { + "sympy/printing/tests/test_numpy.py": [ + "imports" + ], + "sympy/printing/tests/test_pycode.py": [ + "imports", + "test_NumPyPrinter" + ] + }, + "resolved_by": [ + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "sympy/printing/pycode.py": [ + "AbstractPythonCodePrinter._print_MatrixBase" + ] + }, + "alternative_spans": { + "sympy/printing/pycode.py": [ + "AbstractPythonCodePrinter._print_MatrixBase" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter" + ], + "sympy/utilities/lambdify.py": [ + "impl:35" + ] + }, + "alternative_spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sympy/printing/lambdarepr.py": [ + "LambdaPrinter" + ], + "sympy/printing/pycode.py": [ + "NumPyPrinter" + ] + }, + "alternative_spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sympy/printing/pycode.py": [ + "AbstractPythonCodePrinter._print_MatrixBase", + "NumPyPrinter._print_MatrixBase" + ], + "sympy/utilities/lambdify.py": [ + "lambdify" + ] + }, + "alternative_spans": { + "sympy/printing/pycode.py": [ + "AbstractPythonCodePrinter._print_MatrixBase", + "NumPyPrinter._print_MatrixBase" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter._print_MatPow" + ] + }, + "alternative_spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter._print_MatPow" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter" + ], + "sympy/utilities/lambdify.py": [ + "lambdify" + ] + }, + "alternative_spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter._print_MatPow" + ] + }, + "alternative_spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter._print_MatPow" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter._print_MatrixBase" + ] + }, + "alternative_spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter._print_MatrixBase" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter" + ] + }, + "alternative_spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter._print_CodegenArrayElementwiseAdd", + "impl:35" + ] + }, + "alternative_spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter._print_CodegenArrayElementwiseAdd", + "impl:35" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sympy/printing/pycode.py": [ + "PythonCodePrinter._print_sign" + ] + }, + "alternative_spans": { + "sympy/printing/pycode.py": [ + "PythonCodePrinter._print_sign" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sympy/matrices/expressions/tests/test_matadd.py": [], + "sympy/printing/lambdarepr.py": [ + "LambdaPrinter" + ], + "sympy/printing/pycode.py": [ + "AbstractPythonCodePrinter._print_MatrixBase", + "impl:35" + ], + "sympy/utilities/lambdify.py": [ + "imports" + ] + }, + "alternative_spans": { + "sympy/printing/pycode.py": [ + "AbstractPythonCodePrinter._print_MatrixBase", + "impl:35" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240630_agentless_gpt4o", + "spans": { + "sympy/printing/pycode.py": [ + "AbstractPythonCodePrinter._print_MatrixBase" + ] + } + }, + { + "run_name": "20240702_codestory_aide_mixed", + "spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter" + ] + } + }, + { + "run_name": "20240706_sima_gpt4o", + "spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter" + ] + } + }, + { + "run_name": "20240820_honeycomb", + "spans": { + "sympy/printing/pycode.py": [ + "AbstractPythonCodePrinter._print_MatrixBase", + "NumPyPrinter._print_MatrixBase" + ] + } + }, + { + "run_name": "20240829_Isoform", + "spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter._print_MatPow" + ] + } + }, + { + "run_name": "20240622_Lingma_Agent", + "spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter" + ] + } + }, + { + "run_name": "20240621_autocoderover-v20240620", + "spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter._print_MatPow" + ] + } + }, + { + "run_name": "20240617_factory_code_droid", + "spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter._print_MatrixBase" + ] + } + }, + { + "run_name": "20240617_moatless_gpt4o", + "spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter" + ] + } + }, + { + "run_name": "20240509_amazon-q-developer-agent-20240430-dev", + "spans": { + "sympy/printing/pycode.py": [ + "NumPyPrinter._print_CodegenArrayElementwiseAdd", + "impl:35" + ] + } + }, + { + "run_name": "20240811_gru", + "spans": { + "sympy/printing/pycode.py": [ + "PythonCodePrinter._print_sign" + ] + } + }, + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "sympy/printing/pycode.py": [ + "AbstractPythonCodePrinter._print_MatrixBase", + "impl:35" + ] + } + } + ] + }, + { + "instance_id": "scikit-learn__scikit-learn-14087", + "repo": "scikit-learn/scikit-learn", + "base_commit": "a5743ed36fbd3fbc8e351bdab16561fbfca7dfa1", + "problem_statement": "IndexError thrown with LogisticRegressionCV and refit=False\n#### Description\r\nThe following error is thrown when trying to estimate a regularization parameter via cross-validation, *without* refitting.\r\n\r\n#### Steps/Code to Reproduce\r\n```python\r\nimport sys\r\nimport sklearn\r\nfrom sklearn.linear_model import LogisticRegressionCV\r\nimport numpy as np\r\n\r\nnp.random.seed(29)\r\nX = np.random.normal(size=(1000, 3))\r\nbeta = np.random.normal(size=3)\r\nintercept = np.random.normal(size=None)\r\ny = np.sign(intercept + X @ beta)\r\n\r\nLogisticRegressionCV(\r\ncv=5,\r\nsolver='saga', # same error with 'liblinear'\r\ntol=1e-2,\r\nrefit=False).fit(X, y)\r\n```\r\n\r\n\r\n#### Expected Results\r\nNo error is thrown. \r\n\r\n#### Actual Results\r\n```\r\n---------------------------------------------------------------------------\r\nIndexError Traceback (most recent call last)\r\n in \r\n----> 1 LogisticRegressionCV(refit=False).fit(X, y)\r\n\r\n~/.pyenv/versions/3.6.7/envs/jupyter/lib/python3.6/site-packages/sklearn/linear_model/logistic.py in fit(self, X, y, sample_weight)\r\n 2192 else:\r\n 2193 w = np.mean([coefs_paths[:, i, best_indices[i], :]\r\n-> 2194 for i in range(len(folds))], axis=0)\r\n 2195 \r\n 2196 best_indices_C = best_indices % len(self.Cs_)\r\n\r\n~/.pyenv/versions/3.6.7/envs/jupyter/lib/python3.6/site-packages/sklearn/linear_model/logistic.py in (.0)\r\n 2192 else:\r\n 2193 w = np.mean([coefs_paths[:, i, best_indices[i], :]\r\n-> 2194 for i in range(len(folds))], axis=0)\r\n 2195 \r\n 2196 best_indices_C = best_indices % len(self.Cs_)\r\n\r\nIndexError: too many indices for array\r\n```\r\n\r\n#### Versions\r\n```\r\nSystem:\r\n python: 3.6.7 (default, May 13 2019, 16:14:45) [GCC 4.2.1 Compatible Apple LLVM 10.0.1 (clang-1001.0.46.4)]\r\nexecutable: /Users/tsweetser/.pyenv/versions/3.6.7/envs/jupyter/bin/python\r\n machine: Darwin-18.6.0-x86_64-i386-64bit\r\n\r\nBLAS:\r\n macros: NO_ATLAS_INFO=3, HAVE_CBLAS=None\r\n lib_dirs: \r\ncblas_libs: cblas\r\n\r\nPython deps:\r\n pip: 19.1.1\r\nsetuptools: 39.0.1\r\n sklearn: 0.21.2\r\n numpy: 1.15.1\r\n scipy: 1.1.0\r\n Cython: 0.29.6\r\n pandas: 0.24.2\r\n```\n", + "golden_patch": "diff --git a/sklearn/linear_model/logistic.py b/sklearn/linear_model/logistic.py\n--- a/sklearn/linear_model/logistic.py\n+++ b/sklearn/linear_model/logistic.py\n@@ -2170,7 +2170,7 @@ def fit(self, X, y, sample_weight=None):\n # Take the best scores across every fold and the average of\n # all coefficients corresponding to the best scores.\n best_indices = np.argmax(scores, axis=1)\n- if self.multi_class == 'ovr':\n+ if multi_class == 'ovr':\n w = np.mean([coefs_paths[i, best_indices[i], :]\n for i in range(len(folds))], axis=0)\n else:\n@@ -2180,8 +2180,11 @@ def fit(self, X, y, sample_weight=None):\n best_indices_C = best_indices % len(self.Cs_)\n self.C_.append(np.mean(self.Cs_[best_indices_C]))\n \n- best_indices_l1 = best_indices // len(self.Cs_)\n- self.l1_ratio_.append(np.mean(l1_ratios_[best_indices_l1]))\n+ if self.penalty == 'elasticnet':\n+ best_indices_l1 = best_indices // len(self.Cs_)\n+ self.l1_ratio_.append(np.mean(l1_ratios_[best_indices_l1]))\n+ else:\n+ self.l1_ratio_.append(None)\n \n if multi_class == 'multinomial':\n self.C_ = np.tile(self.C_, n_classes)\n", + "test_patch": "diff --git a/sklearn/linear_model/tests/test_logistic.py b/sklearn/linear_model/tests/test_logistic.py\n--- a/sklearn/linear_model/tests/test_logistic.py\n+++ b/sklearn/linear_model/tests/test_logistic.py\n@@ -1532,8 +1532,9 @@ def test_LogisticRegressionCV_GridSearchCV_elastic_net_ovr():\n assert (lrcv.predict(X_test) == gs.predict(X_test)).mean() >= .8\n \n \n-@pytest.mark.parametrize('multi_class', ('ovr', 'multinomial'))\n-def test_LogisticRegressionCV_no_refit(multi_class):\n+@pytest.mark.parametrize('penalty', ('l2', 'elasticnet'))\n+@pytest.mark.parametrize('multi_class', ('ovr', 'multinomial', 'auto'))\n+def test_LogisticRegressionCV_no_refit(penalty, multi_class):\n # Test LogisticRegressionCV attribute shapes when refit is False\n \n n_classes = 3\n@@ -1543,9 +1544,12 @@ def test_LogisticRegressionCV_no_refit(multi_class):\n random_state=0)\n \n Cs = np.logspace(-4, 4, 3)\n- l1_ratios = np.linspace(0, 1, 2)\n+ if penalty == 'elasticnet':\n+ l1_ratios = np.linspace(0, 1, 2)\n+ else:\n+ l1_ratios = None\n \n- lrcv = LogisticRegressionCV(penalty='elasticnet', Cs=Cs, solver='saga',\n+ lrcv = LogisticRegressionCV(penalty=penalty, Cs=Cs, solver='saga',\n l1_ratios=l1_ratios, random_state=0,\n multi_class=multi_class, refit=False)\n lrcv.fit(X, y)\n", + "fail_to_pass": "[\"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegressionCV_no_refit[ovr-l2]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegressionCV_no_refit[multinomial-l2]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegressionCV_no_refit[auto-l2]\"]", + "pass_to_pass": "[\"sklearn/linear_model/tests/test_logistic.py::test_predict_2_classes\", \"sklearn/linear_model/tests/test_logistic.py::test_error\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_cv_mock_scorer\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_cv_score_does_not_warn_by_default\", \"sklearn/linear_model/tests/test_logistic.py::test_lr_liblinear_warning\", \"sklearn/linear_model/tests/test_logistic.py::test_predict_3_classes\", \"sklearn/linear_model/tests/test_logistic.py::test_predict_iris\", \"sklearn/linear_model/tests/test_logistic.py::test_multinomial_validation[lbfgs]\", \"sklearn/linear_model/tests/test_logistic.py::test_multinomial_validation[newton-cg]\", \"sklearn/linear_model/tests/test_logistic.py::test_multinomial_validation[sag]\", \"sklearn/linear_model/tests/test_logistic.py::test_multinomial_validation[saga]\", \"sklearn/linear_model/tests/test_logistic.py::test_check_solver_option[LogisticRegression]\", \"sklearn/linear_model/tests/test_logistic.py::test_check_solver_option[LogisticRegressionCV]\", \"sklearn/linear_model/tests/test_logistic.py::test_multinomial_binary[lbfgs]\", \"sklearn/linear_model/tests/test_logistic.py::test_multinomial_binary[newton-cg]\", \"sklearn/linear_model/tests/test_logistic.py::test_multinomial_binary[sag]\", \"sklearn/linear_model/tests/test_logistic.py::test_multinomial_binary[saga]\", \"sklearn/linear_model/tests/test_logistic.py::test_multinomial_binary_probabilities\", \"sklearn/linear_model/tests/test_logistic.py::test_sparsify\", \"sklearn/linear_model/tests/test_logistic.py::test_inconsistent_input\", \"sklearn/linear_model/tests/test_logistic.py::test_write_parameters\", \"sklearn/linear_model/tests/test_logistic.py::test_nan\", \"sklearn/linear_model/tests/test_logistic.py::test_consistency_path\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_path_convergence_fail\", \"sklearn/linear_model/tests/test_logistic.py::test_liblinear_dual_random_state\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_loss_and_grad\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_grad_hess\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_cv\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_cv_multinomial_score[accuracy-multiclass_agg_list0]\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_cv_multinomial_score[precision-multiclass_agg_list1]\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_cv_multinomial_score[f1-multiclass_agg_list2]\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_cv_multinomial_score[neg_log_loss-multiclass_agg_list3]\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_cv_multinomial_score[recall-multiclass_agg_list4]\", \"sklearn/linear_model/tests/test_logistic.py::test_multinomial_logistic_regression_string_inputs\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_cv_sparse\", \"sklearn/linear_model/tests/test_logistic.py::test_intercept_logistic_helper\", \"sklearn/linear_model/tests/test_logistic.py::test_ovr_multinomial_iris\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_solvers\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_solvers_multiclass\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regressioncv_class_weights\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_sample_weights\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_class_weights\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_multinomial\", \"sklearn/linear_model/tests/test_logistic.py::test_multinomial_grad_hess\", \"sklearn/linear_model/tests/test_logistic.py::test_liblinear_decision_function_zero\", \"sklearn/linear_model/tests/test_logistic.py::test_liblinear_logregcv_sparse\", \"sklearn/linear_model/tests/test_logistic.py::test_saga_sparse\", \"sklearn/linear_model/tests/test_logistic.py::test_logreg_intercept_scaling\", \"sklearn/linear_model/tests/test_logistic.py::test_logreg_intercept_scaling_zero\", \"sklearn/linear_model/tests/test_logistic.py::test_logreg_l1\", \"sklearn/linear_model/tests/test_logistic.py::test_logreg_l1_sparse_data\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_cv_refit[l1-42]\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_cv_refit[l2-42]\", \"sklearn/linear_model/tests/test_logistic.py::test_logreg_predict_proba_multinomial\", \"sklearn/linear_model/tests/test_logistic.py::test_max_iter\", \"sklearn/linear_model/tests/test_logistic.py::test_n_iter[newton-cg]\", \"sklearn/linear_model/tests/test_logistic.py::test_n_iter[liblinear]\", \"sklearn/linear_model/tests/test_logistic.py::test_n_iter[sag]\", \"sklearn/linear_model/tests/test_logistic.py::test_n_iter[saga]\", \"sklearn/linear_model/tests/test_logistic.py::test_n_iter[lbfgs]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[ovr-True-True-newton-cg]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[ovr-True-True-sag]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[ovr-True-True-saga]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[ovr-True-True-lbfgs]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[ovr-True-False-newton-cg]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[ovr-True-False-sag]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[ovr-True-False-saga]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[ovr-True-False-lbfgs]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[ovr-False-True-newton-cg]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[ovr-False-True-sag]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[ovr-False-True-saga]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[ovr-False-True-lbfgs]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[ovr-False-False-newton-cg]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[ovr-False-False-sag]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[ovr-False-False-saga]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[ovr-False-False-lbfgs]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[multinomial-True-True-newton-cg]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[multinomial-True-True-sag]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[multinomial-True-True-saga]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[multinomial-True-True-lbfgs]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[multinomial-True-False-newton-cg]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[multinomial-True-False-sag]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[multinomial-True-False-saga]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[multinomial-True-False-lbfgs]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[multinomial-False-True-newton-cg]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[multinomial-False-True-sag]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[multinomial-False-True-saga]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[multinomial-False-True-lbfgs]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[multinomial-False-False-newton-cg]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[multinomial-False-False-sag]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[multinomial-False-False-saga]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start[multinomial-False-False-lbfgs]\", \"sklearn/linear_model/tests/test_logistic.py::test_saga_vs_liblinear\", \"sklearn/linear_model/tests/test_logistic.py::test_dtype_match[newton-cg-ovr]\", \"sklearn/linear_model/tests/test_logistic.py::test_dtype_match[newton-cg-multinomial]\", \"sklearn/linear_model/tests/test_logistic.py::test_dtype_match[saga-ovr]\", \"sklearn/linear_model/tests/test_logistic.py::test_dtype_match[saga-multinomial]\", \"sklearn/linear_model/tests/test_logistic.py::test_warm_start_converge_LR\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_coeffs\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_l1_l2_equivalence[l1-1-0.001]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_l1_l2_equivalence[l1-1-0.1]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_l1_l2_equivalence[l1-1-1]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_l1_l2_equivalence[l1-1-10]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_l1_l2_equivalence[l1-1-100]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_l1_l2_equivalence[l1-1-1000]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_l1_l2_equivalence[l1-1-1000000.0]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_l1_l2_equivalence[l2-0-0.001]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_l1_l2_equivalence[l2-0-0.1]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_l1_l2_equivalence[l2-0-1]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_l1_l2_equivalence[l2-0-10]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_l1_l2_equivalence[l2-0-100]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_l1_l2_equivalence[l2-0-1000]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_l1_l2_equivalence[l2-0-1000000.0]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_vs_l1_l2[0.001]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_vs_l1_l2[1]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_vs_l1_l2[100]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_vs_l1_l2[1000000.0]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegression_elastic_net_objective[0.1-0.001]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegression_elastic_net_objective[0.1-0.046415888336127795]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegression_elastic_net_objective[0.1-2.1544346900318843]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegression_elastic_net_objective[0.1-100.0]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegression_elastic_net_objective[0.5-0.001]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegression_elastic_net_objective[0.5-0.046415888336127795]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegression_elastic_net_objective[0.5-2.1544346900318843]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegression_elastic_net_objective[0.5-100.0]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegression_elastic_net_objective[0.9-0.001]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegression_elastic_net_objective[0.9-0.046415888336127795]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegression_elastic_net_objective[0.9-2.1544346900318843]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegression_elastic_net_objective[0.9-100.0]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegressionCV_GridSearchCV_elastic_net[ovr]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegressionCV_GridSearchCV_elastic_net[multinomial]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegressionCV_GridSearchCV_elastic_net_ovr\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegressionCV_no_refit[ovr-elasticnet]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegressionCV_no_refit[multinomial-elasticnet]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegressionCV_no_refit[auto-elasticnet]\", \"sklearn/linear_model/tests/test_logistic.py::test_LogisticRegressionCV_elasticnet_attribute_shapes\", \"sklearn/linear_model/tests/test_logistic.py::test_l1_ratio_param[-1]\", \"sklearn/linear_model/tests/test_logistic.py::test_l1_ratio_param[2]\", \"sklearn/linear_model/tests/test_logistic.py::test_l1_ratio_param[None]\", \"sklearn/linear_model/tests/test_logistic.py::test_l1_ratio_param[something_wrong]\", \"sklearn/linear_model/tests/test_logistic.py::test_l1_ratios_param[l1_ratios0]\", \"sklearn/linear_model/tests/test_logistic.py::test_l1_ratios_param[l1_ratios1]\", \"sklearn/linear_model/tests/test_logistic.py::test_l1_ratios_param[None]\", \"sklearn/linear_model/tests/test_logistic.py::test_l1_ratios_param[something_wrong]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_versus_sgd[0.1-0.001]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_versus_sgd[0.1-0.046415888336127795]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_versus_sgd[0.1-2.1544346900318843]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_versus_sgd[0.1-100.0]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_versus_sgd[0.5-0.001]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_versus_sgd[0.5-0.046415888336127795]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_versus_sgd[0.5-2.1544346900318843]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_versus_sgd[0.5-100.0]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_versus_sgd[0.9-0.001]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_versus_sgd[0.9-0.046415888336127795]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_versus_sgd[0.9-2.1544346900318843]\", \"sklearn/linear_model/tests/test_logistic.py::test_elastic_net_versus_sgd[0.9-100.0]\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_path_coefs_multinomial\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_multi_class_auto[liblinear-est0]\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_multi_class_auto[liblinear-est1]\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_multi_class_auto[lbfgs-est0]\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_multi_class_auto[lbfgs-est1]\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_multi_class_auto[newton-cg-est0]\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_multi_class_auto[newton-cg-est1]\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_multi_class_auto[sag-est0]\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_multi_class_auto[sag-est1]\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_multi_class_auto[saga-est0]\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_multi_class_auto[saga-est1]\", \"sklearn/linear_model/tests/test_logistic.py::test_logistic_regression_path_deprecation\", \"sklearn/linear_model/tests/test_logistic.py::test_penalty_none[lbfgs]\", \"sklearn/linear_model/tests/test_logistic.py::test_penalty_none[newton-cg]\", \"sklearn/linear_model/tests/test_logistic.py::test_penalty_none[sag]\", \"sklearn/linear_model/tests/test_logistic.py::test_penalty_none[saga]\"]", + "expected_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV.fit" + ] + }, + "test_file_spans": { + "sklearn/linear_model/tests/test_logistic.py": [ + "test_LogisticRegressionCV_no_refit" + ] + }, + "resolved_by": [ + { + "name": "20240828_autose_mixed", + "updated_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV.fit" + ] + }, + "alternative_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV.fit" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV.fit" + ] + }, + "alternative_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV.fit" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV.fit" + ] + }, + "alternative_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV.fit" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV.fit" + ] + }, + "alternative_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV.fit" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV.fit" + ] + }, + "alternative_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV.fit" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV.fit" + ], + "sklearn/linear_model/tests/test_logistic.py": [] + }, + "alternative_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV.fit" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV.fit" + ] + }, + "alternative_spans": { + "sklearn/linear_model/logistic.py": [ + "LogisticRegressionCV.fit" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "scikit-learn__scikit-learn-14092", + "repo": "scikit-learn/scikit-learn", + "base_commit": "df7dd8391148a873d157328a4f0328528a0c4ed9", + "problem_statement": "NCA fails in GridSearch due to too strict parameter checks\nNCA checks its parameters to have a specific type, which can easily fail in a GridSearch due to how param grid is made.\r\n\r\nHere is an example:\r\n```python\r\nimport numpy as np\r\n\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.neighbors import NeighborhoodComponentsAnalysis\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\nX = np.random.random_sample((100, 10))\r\ny = np.random.randint(2, size=100)\r\n\r\nnca = NeighborhoodComponentsAnalysis()\r\nknn = KNeighborsClassifier()\r\n\r\npipe = Pipeline([('nca', nca),\r\n ('knn', knn)])\r\n \r\nparams = {'nca__tol': [0.1, 0.5, 1],\r\n 'nca__n_components': np.arange(1, 10)}\r\n \r\ngs = GridSearchCV(estimator=pipe, param_grid=params, error_score='raise')\r\ngs.fit(X,y)\r\n```\r\n\r\nThe issue is that for `tol`: 1 is not a float, and for `n_components`: np.int64 is not int\r\n\r\nBefore proposing a fix for this specific situation, I'd like to have your general opinion about parameter checking. \r\nI like this idea of common parameter checking tool introduced with the NCA PR. What do you think about extending it across the code-base (or at least for new or recent estimators) ?\r\n\r\nCurrently parameter checking is not always done or often partially done, and is quite redundant. For instance, here is the input validation of lda:\r\n```python\r\ndef _check_params(self):\r\n \"\"\"Check model parameters.\"\"\"\r\n if self.n_components <= 0:\r\n raise ValueError(\"Invalid 'n_components' parameter: %r\"\r\n % self.n_components)\r\n\r\n if self.total_samples <= 0:\r\n raise ValueError(\"Invalid 'total_samples' parameter: %r\"\r\n % self.total_samples)\r\n\r\n if self.learning_offset < 0:\r\n raise ValueError(\"Invalid 'learning_offset' parameter: %r\"\r\n % self.learning_offset)\r\n\r\n if self.learning_method not in (\"batch\", \"online\"):\r\n raise ValueError(\"Invalid 'learning_method' parameter: %r\"\r\n % self.learning_method)\r\n```\r\nmost params aren't checked and for those who are there's a lot of duplicated code.\r\n\r\nA propose to be upgrade the new tool to be able to check open/closed intervals (currently only closed) and list membership.\r\n\r\nThe api would be something like that:\r\n```\r\ncheck_param(param, name, valid_options)\r\n```\r\nwhere valid_options would be a dict of `type: constraint`. e.g for the `beta_loss` param of `NMF`, it can be either a float or a string in a list, which would give\r\n```\r\nvalid_options = {numbers.Real: None, # None for no constraint\r\n str: ['frobenius', 'kullback-leibler', 'itakura-saito']}\r\n```\r\nSometimes a parameter can only be positive or within a given interval, e.g. `l1_ratio` of `LogisticRegression` must be between 0 and 1, which would give\r\n```\r\nvalid_options = {numbers.Real: Interval(0, 1, closed='both')}\r\n```\r\npositivity of e.g. `max_iter` would be `numbers.Integral: Interval(left=1)`.\n", + "golden_patch": "diff --git a/sklearn/neighbors/nca.py b/sklearn/neighbors/nca.py\n--- a/sklearn/neighbors/nca.py\n+++ b/sklearn/neighbors/nca.py\n@@ -13,6 +13,7 @@\n import numpy as np\n import sys\n import time\n+import numbers\n from scipy.optimize import minimize\n from ..utils.extmath import softmax\n from ..metrics import pairwise_distances\n@@ -299,7 +300,8 @@ def _validate_params(self, X, y):\n \n # Check the preferred dimensionality of the projected space\n if self.n_components is not None:\n- check_scalar(self.n_components, 'n_components', int, 1)\n+ check_scalar(\n+ self.n_components, 'n_components', numbers.Integral, 1)\n \n if self.n_components > X.shape[1]:\n raise ValueError('The preferred dimensionality of the '\n@@ -318,9 +320,9 @@ def _validate_params(self, X, y):\n .format(X.shape[1],\n self.components_.shape[1]))\n \n- check_scalar(self.max_iter, 'max_iter', int, 1)\n- check_scalar(self.tol, 'tol', float, 0.)\n- check_scalar(self.verbose, 'verbose', int, 0)\n+ check_scalar(self.max_iter, 'max_iter', numbers.Integral, 1)\n+ check_scalar(self.tol, 'tol', numbers.Real, 0.)\n+ check_scalar(self.verbose, 'verbose', numbers.Integral, 0)\n \n if self.callback is not None:\n if not callable(self.callback):\n", + "test_patch": "diff --git a/sklearn/neighbors/tests/test_nca.py b/sklearn/neighbors/tests/test_nca.py\n--- a/sklearn/neighbors/tests/test_nca.py\n+++ b/sklearn/neighbors/tests/test_nca.py\n@@ -129,7 +129,7 @@ def test_params_validation():\n # TypeError\n assert_raises(TypeError, NCA(max_iter='21').fit, X, y)\n assert_raises(TypeError, NCA(verbose='true').fit, X, y)\n- assert_raises(TypeError, NCA(tol=1).fit, X, y)\n+ assert_raises(TypeError, NCA(tol='1').fit, X, y)\n assert_raises(TypeError, NCA(n_components='invalid').fit, X, y)\n assert_raises(TypeError, NCA(warm_start=1).fit, X, y)\n \n@@ -518,3 +518,17 @@ def test_convergence_warning():\n assert_warns_message(ConvergenceWarning,\n '[{}] NCA did not converge'.format(cls_name),\n nca.fit, iris_data, iris_target)\n+\n+\n+@pytest.mark.parametrize('param, value', [('n_components', np.int32(3)),\n+ ('max_iter', np.int32(100)),\n+ ('tol', np.float32(0.0001))])\n+def test_parameters_valid_types(param, value):\n+ # check that no error is raised when parameters have numpy integer or\n+ # floating types.\n+ nca = NeighborhoodComponentsAnalysis(**{param: value})\n+\n+ X = iris_data\n+ y = iris_target\n+\n+ nca.fit(X, y)\n", + "fail_to_pass": "[\"sklearn/neighbors/tests/test_nca.py::test_parameters_valid_types[n_components-value0]\", \"sklearn/neighbors/tests/test_nca.py::test_parameters_valid_types[max_iter-value1]\", \"sklearn/neighbors/tests/test_nca.py::test_parameters_valid_types[tol-value2]\"]", + "pass_to_pass": "[\"sklearn/neighbors/tests/test_nca.py::test_simple_example\", \"sklearn/neighbors/tests/test_nca.py::test_toy_example_collapse_points\", \"sklearn/neighbors/tests/test_nca.py::test_finite_differences\", \"sklearn/neighbors/tests/test_nca.py::test_params_validation\", \"sklearn/neighbors/tests/test_nca.py::test_transformation_dimensions\", \"sklearn/neighbors/tests/test_nca.py::test_n_components\", \"sklearn/neighbors/tests/test_nca.py::test_init_transformation\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-5-3-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-5-3-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-5-3-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-5-3-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-5-5-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-5-5-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-5-5-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-5-5-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-5-7-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-5-7-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-5-7-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-5-7-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-5-11-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-5-11-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-5-11-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-5-11-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-7-3-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-7-3-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-7-3-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-7-3-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-7-5-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-7-5-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-7-5-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-7-5-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-7-7-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-7-7-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-7-7-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-7-7-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-7-11-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-7-11-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-7-11-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-7-11-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-11-3-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-11-3-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-11-3-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-11-3-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-11-5-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-11-5-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-11-5-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-11-5-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-11-7-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-11-7-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-11-7-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-11-7-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-11-11-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-11-11-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-11-11-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[3-11-11-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-5-3-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-5-3-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-5-3-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-5-3-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-5-5-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-5-5-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-5-5-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-5-5-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-5-7-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-5-7-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-5-7-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-5-7-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-5-11-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-5-11-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-5-11-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-5-11-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-7-3-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-7-3-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-7-3-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-7-3-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-7-5-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-7-5-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-7-5-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-7-5-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-7-7-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-7-7-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-7-7-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-7-7-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-7-11-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-7-11-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-7-11-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-7-11-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-11-3-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-11-3-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-11-3-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-11-3-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-11-5-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-11-5-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-11-5-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-11-5-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-11-7-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-11-7-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-11-7-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-11-7-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-11-11-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-11-11-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-11-11-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[5-11-11-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-5-3-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-5-3-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-5-3-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-5-3-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-5-5-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-5-5-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-5-5-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-5-5-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-5-7-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-5-7-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-5-7-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-5-7-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-5-11-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-5-11-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-5-11-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-5-11-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-7-3-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-7-3-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-7-3-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-7-3-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-7-5-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-7-5-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-7-5-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-7-5-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-7-7-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-7-7-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-7-7-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-7-7-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-7-11-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-7-11-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-7-11-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-7-11-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-11-3-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-11-3-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-11-3-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-11-3-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-11-5-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-11-5-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-11-5-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-11-5-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-11-7-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-11-7-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-11-7-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-11-7-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-11-11-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-11-11-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-11-11-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[7-11-11-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-5-3-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-5-3-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-5-3-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-5-3-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-5-5-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-5-5-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-5-5-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-5-5-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-5-7-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-5-7-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-5-7-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-5-7-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-5-11-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-5-11-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-5-11-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-5-11-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-7-3-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-7-3-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-7-3-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-7-3-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-7-5-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-7-5-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-7-5-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-7-5-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-7-7-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-7-7-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-7-7-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-7-7-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-7-11-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-7-11-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-7-11-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-7-11-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-11-3-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-11-3-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-11-3-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-11-3-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-11-5-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-11-5-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-11-5-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-11-5-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-11-7-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-11-7-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-11-7-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-11-7-11]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-11-11-3]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-11-11-5]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-11-11-7]\", \"sklearn/neighbors/tests/test_nca.py::test_auto_init[11-11-11-11]\", \"sklearn/neighbors/tests/test_nca.py::test_warm_start_validation\", \"sklearn/neighbors/tests/test_nca.py::test_warm_start_effectiveness\", \"sklearn/neighbors/tests/test_nca.py::test_verbose[pca]\", \"sklearn/neighbors/tests/test_nca.py::test_verbose[lda]\", \"sklearn/neighbors/tests/test_nca.py::test_verbose[identity]\", \"sklearn/neighbors/tests/test_nca.py::test_verbose[random]\", \"sklearn/neighbors/tests/test_nca.py::test_verbose[precomputed]\", \"sklearn/neighbors/tests/test_nca.py::test_no_verbose\", \"sklearn/neighbors/tests/test_nca.py::test_singleton_class\", \"sklearn/neighbors/tests/test_nca.py::test_one_class\", \"sklearn/neighbors/tests/test_nca.py::test_callback\", \"sklearn/neighbors/tests/test_nca.py::test_expected_transformation_shape\", \"sklearn/neighbors/tests/test_nca.py::test_convergence_warning\"]", + "expected_spans": { + "sklearn/neighbors/nca.py": [ + "imports", + "NeighborhoodComponentsAnalysis._validate_params" + ] + }, + "test_file_spans": { + "sklearn/neighbors/tests/test_nca.py": [ + "test_params_validation" + ] + }, + "resolved_by": [ + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sklearn/utils/validation.py": [ + "check_scalar" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sklearn/utils/validation.py": [ + "check_scalar" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sklearn/neighbors/nca.py": [ + "NeighborhoodComponentsAnalysis._validate_params" + ] + }, + "alternative_spans": { + "sklearn/neighbors/nca.py": [ + "NeighborhoodComponentsAnalysis._validate_params" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sklearn/utils/validation.py": [ + "check_scalar" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sklearn/utils/validation.py": [ + "check_scalar" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sklearn/utils/validation.py": [ + "check_scalar" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240811_gru", + "updated_spans": { + "sklearn/neighbors/nca.py": [ + "NeighborhoodComponentsAnalysis._validate_params" + ], + "sklearn/utils/validation.py": [ + "check_scalar" + ] + }, + "alternative_spans": { + "sklearn/neighbors/nca.py": [ + "NeighborhoodComponentsAnalysis._validate_params" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sklearn/neighbors/nca.py": [ + "NeighborhoodComponentsAnalysis._validate_params" + ], + "sklearn/utils/validation.py": [ + "check_scalar" + ] + }, + "alternative_spans": { + "sklearn/neighbors/nca.py": [ + "NeighborhoodComponentsAnalysis._validate_params" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240908_infant_gpt4o", + "spans": { + "sklearn/neighbors/nca.py": [ + "NeighborhoodComponentsAnalysis._validate_params" + ] + } + }, + { + "run_name": "20240811_gru", + "spans": { + "sklearn/neighbors/nca.py": [ + "NeighborhoodComponentsAnalysis._validate_params" + ] + } + }, + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "sklearn/neighbors/nca.py": [ + "NeighborhoodComponentsAnalysis._validate_params" + ] + } + } + ] + }, + { + "instance_id": "pytest-dev__pytest-5495", + "repo": "pytest-dev/pytest", + "base_commit": "1aefb24b37c30fba8fd79a744829ca16e252f340", + "problem_statement": "Confusing assertion rewriting message with byte strings\nThe comparison with assertion rewriting for byte strings is confusing: \r\n```\r\n def test_b():\r\n> assert b\"\" == b\"42\"\r\nE AssertionError: assert b'' == b'42'\r\nE Right contains more items, first extra item: 52\r\nE Full diff:\r\nE - b''\r\nE + b'42'\r\nE ? ++\r\n```\r\n\r\n52 is the ASCII ordinal of \"4\" here.\r\n\r\nIt became clear to me when using another example:\r\n\r\n```\r\n def test_b():\r\n> assert b\"\" == b\"1\"\r\nE AssertionError: assert b'' == b'1'\r\nE Right contains more items, first extra item: 49\r\nE Full diff:\r\nE - b''\r\nE + b'1'\r\nE ? +\r\n```\r\n\r\nNot sure what should/could be done here.\n", + "golden_patch": "diff --git a/src/_pytest/assertion/util.py b/src/_pytest/assertion/util.py\n--- a/src/_pytest/assertion/util.py\n+++ b/src/_pytest/assertion/util.py\n@@ -254,17 +254,38 @@ def _compare_eq_iterable(left, right, verbose=0):\n \n \n def _compare_eq_sequence(left, right, verbose=0):\n+ comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes)\n explanation = []\n len_left = len(left)\n len_right = len(right)\n for i in range(min(len_left, len_right)):\n if left[i] != right[i]:\n+ if comparing_bytes:\n+ # when comparing bytes, we want to see their ascii representation\n+ # instead of their numeric values (#5260)\n+ # using a slice gives us the ascii representation:\n+ # >>> s = b'foo'\n+ # >>> s[0]\n+ # 102\n+ # >>> s[0:1]\n+ # b'f'\n+ left_value = left[i : i + 1]\n+ right_value = right[i : i + 1]\n+ else:\n+ left_value = left[i]\n+ right_value = right[i]\n+\n explanation += [\n- \"At index {} diff: {!r} != {!r}\".format(i, left[i], right[i])\n+ \"At index {} diff: {!r} != {!r}\".format(i, left_value, right_value)\n ]\n break\n- len_diff = len_left - len_right\n \n+ if comparing_bytes:\n+ # when comparing bytes, it doesn't help to show the \"sides contain one or more items\"\n+ # longer explanation, so skip it\n+ return explanation\n+\n+ len_diff = len_left - len_right\n if len_diff:\n if len_diff > 0:\n dir_with_more = \"Left\"\n", + "test_patch": "diff --git a/testing/test_assertion.py b/testing/test_assertion.py\n--- a/testing/test_assertion.py\n+++ b/testing/test_assertion.py\n@@ -331,6 +331,27 @@ def test_multiline_text_diff(self):\n assert \"- spam\" in diff\n assert \"+ eggs\" in diff\n \n+ def test_bytes_diff_normal(self):\n+ \"\"\"Check special handling for bytes diff (#5260)\"\"\"\n+ diff = callequal(b\"spam\", b\"eggs\")\n+\n+ assert diff == [\n+ \"b'spam' == b'eggs'\",\n+ \"At index 0 diff: b's' != b'e'\",\n+ \"Use -v to get the full diff\",\n+ ]\n+\n+ def test_bytes_diff_verbose(self):\n+ \"\"\"Check special handling for bytes diff (#5260)\"\"\"\n+ diff = callequal(b\"spam\", b\"eggs\", verbose=True)\n+ assert diff == [\n+ \"b'spam' == b'eggs'\",\n+ \"At index 0 diff: b's' != b'e'\",\n+ \"Full diff:\",\n+ \"- b'spam'\",\n+ \"+ b'eggs'\",\n+ ]\n+\n def test_list(self):\n expl = callequal([0, 1], [0, 2])\n assert len(expl) > 1\n", + "fail_to_pass": "[\"testing/test_assertion.py::TestAssert_reprcompare::test_bytes_diff_normal\", \"testing/test_assertion.py::TestAssert_reprcompare::test_bytes_diff_verbose\"]", + "pass_to_pass": "[\"testing/test_assertion.py::TestImportHookInstallation::test_register_assert_rewrite_checks_types\", \"testing/test_assertion.py::TestAssert_reprcompare::test_different_types\", \"testing/test_assertion.py::TestAssert_reprcompare::test_summary\", \"testing/test_assertion.py::TestAssert_reprcompare::test_text_diff\", \"testing/test_assertion.py::TestAssert_reprcompare::test_text_skipping\", \"testing/test_assertion.py::TestAssert_reprcompare::test_text_skipping_verbose\", \"testing/test_assertion.py::TestAssert_reprcompare::test_multiline_text_diff\", \"testing/test_assertion.py::TestAssert_reprcompare::test_list\", \"testing/test_assertion.py::TestAssert_reprcompare::test_iterable_full_diff[left0-right0-\\\\n\", \"testing/test_assertion.py::TestAssert_reprcompare::test_iterable_full_diff[left1-right1-\\\\n\", \"testing/test_assertion.py::TestAssert_reprcompare::test_iterable_full_diff[left2-right2-\\\\n\", \"testing/test_assertion.py::TestAssert_reprcompare::test_list_different_lengths\", \"testing/test_assertion.py::TestAssert_reprcompare::test_dict\", \"testing/test_assertion.py::TestAssert_reprcompare::test_dict_omitting\", \"testing/test_assertion.py::TestAssert_reprcompare::test_dict_omitting_with_verbosity_1\", \"testing/test_assertion.py::TestAssert_reprcompare::test_dict_omitting_with_verbosity_2\", \"testing/test_assertion.py::TestAssert_reprcompare::test_dict_different_items\", \"testing/test_assertion.py::TestAssert_reprcompare::test_sequence_different_items\", \"testing/test_assertion.py::TestAssert_reprcompare::test_set\", \"testing/test_assertion.py::TestAssert_reprcompare::test_frozenzet\", \"testing/test_assertion.py::TestAssert_reprcompare::test_Sequence\", \"testing/test_assertion.py::TestAssert_reprcompare::test_list_tuples\", \"testing/test_assertion.py::TestAssert_reprcompare::test_repr_verbose\", \"testing/test_assertion.py::TestAssert_reprcompare::test_list_bad_repr\", \"testing/test_assertion.py::TestAssert_reprcompare::test_one_repr_empty\", \"testing/test_assertion.py::TestAssert_reprcompare::test_repr_no_exc\", \"testing/test_assertion.py::TestAssert_reprcompare::test_unicode\", \"testing/test_assertion.py::TestAssert_reprcompare::test_nonascii_text\", \"testing/test_assertion.py::TestAssert_reprcompare::test_format_nonascii_explanation\", \"testing/test_assertion.py::TestAssert_reprcompare::test_mojibake\", \"testing/test_assertion.py::TestAssert_reprcompare_attrsclass::test_comparing_two_different_attrs_classes\", \"testing/test_assertion.py::TestFormatExplanation::test_fmt_simple\", \"testing/test_assertion.py::TestFormatExplanation::test_fmt_where\", \"testing/test_assertion.py::TestFormatExplanation::test_fmt_and\", \"testing/test_assertion.py::TestFormatExplanation::test_fmt_where_nested\", \"testing/test_assertion.py::TestFormatExplanation::test_fmt_newline\", \"testing/test_assertion.py::TestFormatExplanation::test_fmt_newline_escaped\", \"testing/test_assertion.py::TestFormatExplanation::test_fmt_newline_before_where\", \"testing/test_assertion.py::TestFormatExplanation::test_fmt_multi_newline_before_where\", \"testing/test_assertion.py::TestTruncateExplanation::test_doesnt_truncate_when_input_is_empty_list\", \"testing/test_assertion.py::TestTruncateExplanation::test_doesnt_truncate_at_when_input_is_5_lines_and_LT_max_chars\", \"testing/test_assertion.py::TestTruncateExplanation::test_truncates_at_8_lines_when_given_list_of_empty_strings\", \"testing/test_assertion.py::TestTruncateExplanation::test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars\", \"testing/test_assertion.py::TestTruncateExplanation::test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars\", \"testing/test_assertion.py::TestTruncateExplanation::test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars\", \"testing/test_assertion.py::TestTruncateExplanation::test_truncates_at_1_line_when_first_line_is_GT_max_chars\", \"testing/test_assertion.py::test_reprcompare_notin\", \"testing/test_assertion.py::test_reprcompare_whitespaces\", \"testing/test_assertion.py::test_exit_from_assertrepr_compare\", \"testing/test_assertion.py::TestImportHookInstallation::test_conftest_assertion_rewrite[plain-True]\", \"testing/test_assertion.py::TestImportHookInstallation::test_conftest_assertion_rewrite[plain-False]\", \"testing/test_assertion.py::TestImportHookInstallation::test_conftest_assertion_rewrite[rewrite-True]\", \"testing/test_assertion.py::TestImportHookInstallation::test_conftest_assertion_rewrite[rewrite-False]\", \"testing/test_assertion.py::TestImportHookInstallation::test_rewrite_assertions_pytester_plugin\", \"testing/test_assertion.py::TestImportHookInstallation::test_pytest_plugins_rewrite[plain]\", \"testing/test_assertion.py::TestImportHookInstallation::test_pytest_plugins_rewrite[rewrite]\", \"testing/test_assertion.py::TestImportHookInstallation::test_pytest_plugins_rewrite_module_names[str]\", \"testing/test_assertion.py::TestImportHookInstallation::test_pytest_plugins_rewrite_module_names[list]\", \"testing/test_assertion.py::TestImportHookInstallation::test_pytest_plugins_rewrite_module_names_correctly\", \"testing/test_assertion.py::TestImportHookInstallation::test_rewrite_ast\", \"testing/test_assertion.py::TestBinReprIntegration::test_pytest_assertrepr_compare_called\", \"testing/test_assertion.py::TestAssert_reprcompare_dataclass::test_dataclasses\", \"testing/test_assertion.py::TestAssert_reprcompare_dataclass::test_dataclasses_verbose\", \"testing/test_assertion.py::TestAssert_reprcompare_dataclass::test_dataclasses_with_attribute_comparison_off\", \"testing/test_assertion.py::TestAssert_reprcompare_dataclass::test_comparing_two_different_data_classes\", \"testing/test_assertion.py::TestFormatExplanation::test_special_chars_full\", \"testing/test_assertion.py::TestTruncateExplanation::test_full_output_truncated\", \"testing/test_assertion.py::test_python25_compile_issue257\", \"testing/test_assertion.py::test_rewritten\", \"testing/test_assertion.py::test_pytest_assertrepr_compare_integration\", \"testing/test_assertion.py::test_sequence_comparison_uses_repr\", \"testing/test_assertion.py::test_assertrepr_loaded_per_dir\", \"testing/test_assertion.py::test_assertion_options\", \"testing/test_assertion.py::test_triple_quoted_string_issue113\", \"testing/test_assertion.py::test_traceback_failure\", \"testing/test_assertion.py::test_exception_handling_no_traceback\", \"testing/test_assertion.py::test_warn_missing\", \"testing/test_assertion.py::test_recursion_source_decode\", \"testing/test_assertion.py::test_AssertionError_message\", \"testing/test_assertion.py::test_diff_newline_at_end\", \"testing/test_assertion.py::test_assert_tuple_warning\", \"testing/test_assertion.py::test_assert_indirect_tuple_no_warning\", \"testing/test_assertion.py::test_assert_with_unicode\", \"testing/test_assertion.py::test_raise_unprintable_assertion_error\", \"testing/test_assertion.py::test_raise_assertion_error_raisin_repr\", \"testing/test_assertion.py::test_issue_1944\"]", + "expected_spans": { + "src/_pytest/assertion/util.py": [ + "_compare_eq_sequence" + ] + }, + "test_file_spans": { + "testing/test_assertion.py": [ + "TestAssert_reprcompare.test_list" + ] + }, + "resolved_by": [ + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "src/_pytest/assertion/util.py": [ + "_compare_eq_sequence" + ] + }, + "alternative_spans": { + "src/_pytest/assertion/util.py": [ + "_compare_eq_sequence" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "src/_pytest/assertion/util.py": [ + "_compare_eq_sequence" + ] + }, + "alternative_spans": { + "src/_pytest/assertion/util.py": [ + "_compare_eq_sequence" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "src/_pytest/assertion/util.py": [ + "_compare_eq_sequence" + ] + }, + "alternative_spans": { + "src/_pytest/assertion/util.py": [ + "_compare_eq_sequence" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-17139", + "repo": "sympy/sympy", + "base_commit": "1d3327b8e90a186df6972991963a5ae87053259d", + "problem_statement": "simplify(cos(x)**I): Invalid comparison of complex I (fu.py)\n```\r\n>>> from sympy import *\r\n>>> x = Symbol('x')\r\n>>> print(simplify(cos(x)**I))\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"/home/e/se/sympy/simplify/simplify.py\", line 587, in simplify\r\n expr = trigsimp(expr, deep=True)\r\n File \"/home/e/se/sympy/simplify/trigsimp.py\", line 508, in trigsimp\r\n return trigsimpfunc(expr)\r\n File \"/home/e/se/sympy/simplify/trigsimp.py\", line 501, in \r\n 'matching': (lambda x: futrig(x)),\r\n File \"/home/e/se/sympy/simplify/trigsimp.py\", line 1101, in futrig\r\n e = bottom_up(e, lambda x: _futrig(x, **kwargs))\r\n File \"/home/e/se/sympy/simplify/simplify.py\", line 1081, in bottom_up\r\n rv = F(rv)\r\n File \"/home/e/se/sympy/simplify/trigsimp.py\", line 1101, in \r\n e = bottom_up(e, lambda x: _futrig(x, **kwargs))\r\n File \"/home/e/se/sympy/simplify/trigsimp.py\", line 1169, in _futrig\r\n e = greedy(tree, objective=Lops)(e)\r\n File \"/home/e/se/sympy/strategies/core.py\", line 115, in minrule\r\n return min([rule(expr) for rule in rules], key=objective)\r\n File \"/home/e/se/sympy/strategies/core.py\", line 115, in \r\n return min([rule(expr) for rule in rules], key=objective)\r\n File \"/home/e/se/sympy/strategies/core.py\", line 44, in chain_rl\r\n expr = rule(expr)\r\n File \"/home/e/se/sympy/simplify/fu.py\", line 566, in TR6\r\n return _TR56(rv, cos, sin, lambda x: 1 - x, max=max, pow=pow)\r\n File \"/home/e/se/sympy/simplify/fu.py\", line 524, in _TR56\r\n return bottom_up(rv, _f)\r\n File \"/home/e/se/sympy/simplify/simplify.py\", line 1081, in bottom_up\r\n rv = F(rv)\r\n File \"/home/e/se/sympy/simplify/fu.py\", line 504, in _f\r\n if (rv.exp < 0) == True:\r\n File \"/home/e/se/sympy/core/expr.py\", line 406, in __lt__\r\n raise TypeError(\"Invalid comparison of complex %s\" % me)\r\nTypeError: Invalid comparison of complex I\r\n```\n", + "golden_patch": "diff --git a/sympy/simplify/fu.py b/sympy/simplify/fu.py\n--- a/sympy/simplify/fu.py\n+++ b/sympy/simplify/fu.py\n@@ -500,6 +500,8 @@ def _f(rv):\n # change is not going to allow a simplification as far as I can tell.\n if not (rv.is_Pow and rv.base.func == f):\n return rv\n+ if not rv.exp.is_real:\n+ return rv\n \n if (rv.exp < 0) == True:\n return rv\n", + "test_patch": "diff --git a/sympy/simplify/tests/test_fu.py b/sympy/simplify/tests/test_fu.py\n--- a/sympy/simplify/tests/test_fu.py\n+++ b/sympy/simplify/tests/test_fu.py\n@@ -76,6 +76,10 @@ def test__TR56():\n assert T(sin(x)**6, sin, cos, h, 6, True) == sin(x)**6\n assert T(sin(x)**8, sin, cos, h, 10, True) == (-cos(x)**2 + 1)**4\n \n+ # issue 17137\n+ assert T(sin(x)**I, sin, cos, h, 4, True) == sin(x)**I\n+ assert T(sin(x)**(2*I + 1), sin, cos, h, 4, True) == sin(x)**(2*I + 1)\n+\n \n def test_TR5():\n assert TR5(sin(x)**2) == -cos(x)**2 + 1\ndiff --git a/sympy/simplify/tests/test_simplify.py b/sympy/simplify/tests/test_simplify.py\n--- a/sympy/simplify/tests/test_simplify.py\n+++ b/sympy/simplify/tests/test_simplify.py\n@@ -811,6 +811,11 @@ def test_issue_15965():\n assert simplify(B) == bnew\n \n \n+def test_issue_17137():\n+ assert simplify(cos(x)**I) == cos(x)**I\n+ assert simplify(cos(x)**(2 + 3*I)) == cos(x)**(2 + 3*I)\n+\n+\n def test_issue_7971():\n z = Integral(x, (x, 1, 1))\n assert z != 0\n", + "fail_to_pass": "[\"test__TR56\", \"test_issue_17137\"]", + "pass_to_pass": "[\"test_TR1\", \"test_TR2\", \"test_TR2i\", \"test_TR3\", \"test_TR5\", \"test_TR6\", \"test_TR7\", \"test_TR8\", \"test_TR9\", \"test_TR10\", \"test_TR10i\", \"test_TR11\", \"test_TR12\", \"test_TR13\", \"test_L\", \"test_fu\", \"test_objective\", \"test_process_common_addends\", \"test_trig_split\", \"test_TRmorrie\", \"test_TRpower\", \"test_hyper_as_trig\", \"test_TR12i\", \"test_TR14\", \"test_TR15_16_17\", \"test_issue_7263\", \"test_simplify_expr\", \"test_issue_3557\", \"test_simplify_other\", \"test_simplify_complex\", \"test_simplify_ratio\", \"test_simplify_measure\", \"test_simplify_rational\", \"test_simplify_issue_1308\", \"test_issue_5652\", \"test_simplify_fail1\", \"test_nthroot\", \"test_nthroot1\", \"test_separatevars\", \"test_separatevars_advanced_factor\", \"test_hypersimp\", \"test_nsimplify\", \"test_issue_9448\", \"test_extract_minus_sign\", \"test_diff\", \"test_logcombine_1\", \"test_logcombine_complex_coeff\", \"test_issue_5950\", \"test_posify\", \"test_issue_4194\", \"test_as_content_primitive\", \"test_signsimp\", \"test_besselsimp\", \"test_Piecewise\", \"test_polymorphism\", \"test_issue_from_PR1599\", \"test_issue_6811\", \"test_issue_6920\", \"test_issue_7001\", \"test_inequality_no_auto_simplify\", \"test_issue_9398\", \"test_issue_9324_simplify\", \"test_issue_13474\", \"test_simplify_function_inverse\", \"test_clear_coefficients\", \"test_nc_simplify\", \"test_issue_15965\"]", + "expected_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + }, + "test_file_spans": { + "sympy/simplify/tests/test_fu.py": [ + "test__TR56" + ], + "sympy/simplify/tests/test_simplify.py": [ + "test_issue_7971" + ] + }, + "resolved_by": [ + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + }, + "alternative_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + }, + "alternative_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + }, + "alternative_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sympy/simplify/fu.py": [ + "imports", + "TR0", + "TR1", + "TR2", + "TR2i", + "TR3", + "TR4", + "_TR56", + "TR7", + "TR8", + "TR9", + "TR10", + "TR10i", + "TR11", + "TR12", + "TR12i", + "TR13", + "TRmorrie", + "TR14", + "TR15", + "TR16", + "TR111", + "TR22", + "TRpower", + "L", + "impl", + "fu", + "process_common_addends", + "impl:15", + "_roots", + "trig_split", + "as_f_sign_1", + "_osborne", + "_osbornei", + "hyper_as_trig", + "sincos_to_sum" + ] + }, + "alternative_spans": { + "sympy/simplify/fu.py": [ + "imports", + "TR0", + "TR1", + "TR2", + "TR2i", + "TR3", + "TR4", + "_TR56", + "TR7", + "TR8", + "TR9", + "TR10", + "TR10i", + "TR11", + "TR12", + "TR12i", + "TR13", + "TRmorrie", + "TR14", + "TR15", + "TR16", + "TR111", + "TR22", + "TRpower", + "L", + "impl", + "fu", + "process_common_addends", + "impl:15", + "_roots", + "trig_split", + "as_f_sign_1", + "_osborne", + "_osbornei", + "hyper_as_trig", + "sincos_to_sum" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + }, + "alternative_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + }, + "alternative_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + }, + "alternative_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + }, + "alternative_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + }, + "alternative_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + }, + "alternative_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + }, + "alternative_spans": { + "sympy/simplify/fu.py": [ + "_TR56" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-11564", + "repo": "django/django", + "base_commit": "580e644f24f1c5ae5b94784fb73a9953a178fd26", + "problem_statement": "Add support for SCRIPT_NAME in STATIC_URL and MEDIA_URL\nDescription\n\t \n\t\t(last modified by Rostyslav Bryzgunov)\n\t \nBy default, {% static '...' %} tag just appends STATIC_URL in the path. When running on sub-path, using SCRIPT_NAME WSGI param, it results in incorrect static URL - it doesn't prepend SCRIPT_NAME prefix.\nThis problem can be solved with prepending SCRIPT_NAME to STATIC_URL in settings.py but that doesn't work when SCRIPT_NAME is a dynamic value.\nThis can be easily added into default Django static tag and django.contrib.staticfiles tag as following:\ndef render(self, context):\n\turl = self.url(context)\n\t# Updating url here with request.META['SCRIPT_NAME'] \n\tif self.varname is None:\n\t\treturn url\n\tcontext[self.varname] = url\n\t\treturn ''\nOn more research I found that FileSystemStorage and StaticFilesStorage ignores SCRIPT_NAME as well. \nWe might have to do a lot of changes but I think it's worth the efforts.\n", + "golden_patch": "diff --git a/django/conf/__init__.py b/django/conf/__init__.py\n--- a/django/conf/__init__.py\n+++ b/django/conf/__init__.py\n@@ -15,7 +15,8 @@\n \n import django\n from django.conf import global_settings\n-from django.core.exceptions import ImproperlyConfigured\n+from django.core.exceptions import ImproperlyConfigured, ValidationError\n+from django.core.validators import URLValidator\n from django.utils.deprecation import RemovedInDjango40Warning\n from django.utils.functional import LazyObject, empty\n \n@@ -109,6 +110,26 @@ def configure(self, default_settings=global_settings, **options):\n setattr(holder, name, value)\n self._wrapped = holder\n \n+ @staticmethod\n+ def _add_script_prefix(value):\n+ \"\"\"\n+ Add SCRIPT_NAME prefix to relative paths.\n+\n+ Useful when the app is being served at a subpath and manually prefixing\n+ subpath to STATIC_URL and MEDIA_URL in settings is inconvenient.\n+ \"\"\"\n+ # Don't apply prefix to valid URLs.\n+ try:\n+ URLValidator()(value)\n+ return value\n+ except (ValidationError, AttributeError):\n+ pass\n+ # Don't apply prefix to absolute paths.\n+ if value.startswith('/'):\n+ return value\n+ from django.urls import get_script_prefix\n+ return '%s%s' % (get_script_prefix(), value)\n+\n @property\n def configured(self):\n \"\"\"Return True if the settings have already been configured.\"\"\"\n@@ -128,6 +149,14 @@ def PASSWORD_RESET_TIMEOUT_DAYS(self):\n )\n return self.__getattr__('PASSWORD_RESET_TIMEOUT_DAYS')\n \n+ @property\n+ def STATIC_URL(self):\n+ return self._add_script_prefix(self.__getattr__('STATIC_URL'))\n+\n+ @property\n+ def MEDIA_URL(self):\n+ return self._add_script_prefix(self.__getattr__('MEDIA_URL'))\n+\n \n class Settings:\n def __init__(self, settings_module):\n", + "test_patch": "diff --git a/tests/file_storage/tests.py b/tests/file_storage/tests.py\n--- a/tests/file_storage/tests.py\n+++ b/tests/file_storage/tests.py\n@@ -521,7 +521,7 @@ def test_setting_changed(self):\n defaults_storage = self.storage_class()\n settings = {\n 'MEDIA_ROOT': 'overridden_media_root',\n- 'MEDIA_URL': 'overridden_media_url/',\n+ 'MEDIA_URL': '/overridden_media_url/',\n 'FILE_UPLOAD_PERMISSIONS': 0o333,\n 'FILE_UPLOAD_DIRECTORY_PERMISSIONS': 0o333,\n }\ndiff --git a/tests/settings_tests/tests.py b/tests/settings_tests/tests.py\n--- a/tests/settings_tests/tests.py\n+++ b/tests/settings_tests/tests.py\n@@ -12,6 +12,7 @@\n override_settings, signals,\n )\n from django.test.utils import requires_tz_support\n+from django.urls import clear_script_prefix, set_script_prefix\n \n \n @modify_settings(ITEMS={\n@@ -567,3 +568,51 @@ def decorated_function():\n signals.setting_changed.disconnect(self.receiver)\n # This call shouldn't raise any errors.\n decorated_function()\n+\n+\n+class MediaURLStaticURLPrefixTest(SimpleTestCase):\n+ def set_script_name(self, val):\n+ clear_script_prefix()\n+ if val is not None:\n+ set_script_prefix(val)\n+\n+ def test_not_prefixed(self):\n+ # Don't add SCRIPT_NAME prefix to valid URLs, absolute paths or None.\n+ tests = (\n+ '/path/',\n+ 'http://myhost.com/path/',\n+ None,\n+ )\n+ for setting in ('MEDIA_URL', 'STATIC_URL'):\n+ for path in tests:\n+ new_settings = {setting: path}\n+ with self.settings(**new_settings):\n+ for script_name in ['/somesubpath', '/somesubpath/', '/', '', None]:\n+ with self.subTest(script_name=script_name, **new_settings):\n+ try:\n+ self.set_script_name(script_name)\n+ self.assertEqual(getattr(settings, setting), path)\n+ finally:\n+ clear_script_prefix()\n+\n+ def test_add_script_name_prefix(self):\n+ tests = (\n+ # Relative paths.\n+ ('/somesubpath', 'path/', '/somesubpath/path/'),\n+ ('/somesubpath/', 'path/', '/somesubpath/path/'),\n+ ('/', 'path/', '/path/'),\n+ # Invalid URLs.\n+ ('/somesubpath/', 'htp://myhost.com/path/', '/somesubpath/htp://myhost.com/path/'),\n+ # Blank settings.\n+ ('/somesubpath/', '', '/somesubpath/'),\n+ )\n+ for setting in ('MEDIA_URL', 'STATIC_URL'):\n+ for script_name, path, expected_path in tests:\n+ new_settings = {setting: path}\n+ with self.settings(**new_settings):\n+ with self.subTest(script_name=script_name, **new_settings):\n+ try:\n+ self.set_script_name(script_name)\n+ self.assertEqual(getattr(settings, setting), expected_path)\n+ finally:\n+ clear_script_prefix()\n", + "fail_to_pass": "[\"test_add_script_name_prefix (settings_tests.tests.MediaURLStaticURLPrefixTest)\", \"test_not_prefixed (settings_tests.tests.MediaURLStaticURLPrefixTest)\"]", + "pass_to_pass": "[\"test_max_recursion_error (settings_tests.tests.ClassDecoratedTestCaseSuper)\", \"test_override_settings_inheritance (settings_tests.tests.ChildDecoratedTestCase)\", \"test_method_override (settings_tests.tests.FullyDecoratedTestCase)\", \"test_override (settings_tests.tests.FullyDecoratedTestCase)\", \"test_max_recursion_error (settings_tests.tests.ClassDecoratedTestCase)\", \"test_method_override (settings_tests.tests.ClassDecoratedTestCase)\", \"test_override (settings_tests.tests.ClassDecoratedTestCase)\", \"Settings are overridden within setUpClass (#21281).\", \"Regression test for #9610.\", \"test_first_character_dot (file_storage.tests.FileStoragePathParsing)\", \"test_get_filesystem_storage (file_storage.tests.GetStorageClassTests)\", \"test_get_invalid_storage_module (file_storage.tests.GetStorageClassTests)\", \"test_get_nonexistent_storage_class (file_storage.tests.GetStorageClassTests)\", \"test_get_nonexistent_storage_module (file_storage.tests.GetStorageClassTests)\", \"Receiver fails on both enter and exit.\", \"Receiver fails on enter only.\", \"Receiver fails on exit only.\", \"test_override_settings_reusable_on_enter (settings_tests.tests.OverrideSettingsIsolationOnExceptionTests)\", \"test_configure (settings_tests.tests.IsOverriddenTest)\", \"test_evaluated_lazysettings_repr (settings_tests.tests.IsOverriddenTest)\", \"test_module (settings_tests.tests.IsOverriddenTest)\", \"test_override (settings_tests.tests.IsOverriddenTest)\", \"test_settings_repr (settings_tests.tests.IsOverriddenTest)\", \"test_unevaluated_lazysettings_repr (settings_tests.tests.IsOverriddenTest)\", \"test_usersettingsholder_repr (settings_tests.tests.IsOverriddenTest)\", \"test_content_saving (file_storage.tests.ContentFileStorageTestCase)\", \"test_none (settings_tests.tests.SecureProxySslHeaderTest)\", \"test_set_with_xheader_right (settings_tests.tests.SecureProxySslHeaderTest)\", \"test_set_with_xheader_wrong (settings_tests.tests.SecureProxySslHeaderTest)\", \"test_set_without_xheader (settings_tests.tests.SecureProxySslHeaderTest)\", \"test_xheader_preferred_to_underlying_request (settings_tests.tests.SecureProxySslHeaderTest)\", \"Regression test for #19031\", \"test_already_configured (settings_tests.tests.SettingsTests)\", \"test_class_decorator (settings_tests.tests.SettingsTests)\", \"test_context_manager (settings_tests.tests.SettingsTests)\", \"test_decorator (settings_tests.tests.SettingsTests)\", \"test_incorrect_timezone (settings_tests.tests.SettingsTests)\", \"test_no_secret_key (settings_tests.tests.SettingsTests)\", \"test_no_settings_module (settings_tests.tests.SettingsTests)\", \"test_nonupper_settings_ignored_in_default_settings (settings_tests.tests.SettingsTests)\", \"test_nonupper_settings_prohibited_in_configure (settings_tests.tests.SettingsTests)\", \"test_override (settings_tests.tests.SettingsTests)\", \"test_override_change (settings_tests.tests.SettingsTests)\", \"test_override_doesnt_leak (settings_tests.tests.SettingsTests)\", \"test_override_settings_delete (settings_tests.tests.SettingsTests)\", \"test_override_settings_nested (settings_tests.tests.SettingsTests)\", \"test_settings_delete (settings_tests.tests.SettingsTests)\", \"test_settings_delete_wrapped (settings_tests.tests.SettingsTests)\", \"test_signal_callback_context_manager (settings_tests.tests.SettingsTests)\", \"test_signal_callback_decorator (settings_tests.tests.SettingsTests)\", \"test_tuple_settings (settings_tests.tests.TestListSettings)\", \"test_deconstruction (file_storage.tests.FileSystemStorageTests)\", \"test_lazy_base_url_init (file_storage.tests.FileSystemStorageTests)\", \"test_file_upload_default_permissions (file_storage.tests.FileStoragePermissions)\", \"test_file_upload_directory_default_permissions (file_storage.tests.FileStoragePermissions)\", \"test_file_upload_directory_permissions (file_storage.tests.FileStoragePermissions)\", \"test_file_upload_permissions (file_storage.tests.FileStoragePermissions)\", \"test_decorated_testcase_module (settings_tests.tests.FullyDecoratedTranTestCase)\", \"test_decorated_testcase_name (settings_tests.tests.FullyDecoratedTranTestCase)\", \"test_method_list_override (settings_tests.tests.FullyDecoratedTranTestCase)\", \"test_method_list_override_nested_order (settings_tests.tests.FullyDecoratedTranTestCase)\", \"test_method_list_override_no_ops (settings_tests.tests.FullyDecoratedTranTestCase)\", \"test_method_list_override_strings (settings_tests.tests.FullyDecoratedTranTestCase)\", \"test_method_override (settings_tests.tests.FullyDecoratedTranTestCase)\", \"test_override (settings_tests.tests.FullyDecoratedTranTestCase)\", \"test_custom_valid_name_callable_upload_to (file_storage.tests.FileFieldStorageTests)\", \"test_duplicate_filename (file_storage.tests.FileFieldStorageTests)\", \"test_empty_upload_to (file_storage.tests.FileFieldStorageTests)\", \"test_extended_length_storage (file_storage.tests.FileFieldStorageTests)\", \"test_file_object (file_storage.tests.FileFieldStorageTests)\", \"test_file_truncation (file_storage.tests.FileFieldStorageTests)\", \"test_filefield_default (file_storage.tests.FileFieldStorageTests)\", \"test_filefield_pickling (file_storage.tests.FileFieldStorageTests)\", \"test_filefield_read (file_storage.tests.FileFieldStorageTests)\", \"test_filefield_reopen (file_storage.tests.FileFieldStorageTests)\", \"test_filefield_write (file_storage.tests.FileFieldStorageTests)\", \"test_files (file_storage.tests.FileFieldStorageTests)\", \"test_pathlib_upload_to (file_storage.tests.FileFieldStorageTests)\", \"test_random_upload_to (file_storage.tests.FileFieldStorageTests)\", \"test_stringio (file_storage.tests.FileFieldStorageTests)\", \"test_base_url (file_storage.tests.OverwritingStorageTests)\", \"test_delete_deletes_directories (file_storage.tests.OverwritingStorageTests)\", \"test_delete_no_name (file_storage.tests.OverwritingStorageTests)\", \"test_empty_location (file_storage.tests.OverwritingStorageTests)\", \"test_file_access_options (file_storage.tests.OverwritingStorageTests)\", \"test_file_chunks_error (file_storage.tests.OverwritingStorageTests)\", \"test_file_get_accessed_time (file_storage.tests.OverwritingStorageTests)\", \"test_file_get_accessed_time_timezone (file_storage.tests.OverwritingStorageTests)\", \"test_file_get_created_time (file_storage.tests.OverwritingStorageTests)\", \"test_file_get_created_time_timezone (file_storage.tests.OverwritingStorageTests)\", \"test_file_get_modified_time (file_storage.tests.OverwritingStorageTests)\", \"test_file_get_modified_time_timezone (file_storage.tests.OverwritingStorageTests)\", \"test_file_path (file_storage.tests.OverwritingStorageTests)\", \"test_file_save_with_path (file_storage.tests.OverwritingStorageTests)\", \"test_file_save_without_name (file_storage.tests.OverwritingStorageTests)\", \"The storage backend should preserve case of filenames.\", \"test_file_storage_prevents_directory_traversal (file_storage.tests.OverwritingStorageTests)\", \"test_file_url (file_storage.tests.OverwritingStorageTests)\", \"test_listdir (file_storage.tests.OverwritingStorageTests)\", \"test_makedirs_race_handling (file_storage.tests.OverwritingStorageTests)\", \"test_remove_race_handling (file_storage.tests.OverwritingStorageTests)\", \"test_save_doesnt_close (file_storage.tests.OverwritingStorageTests)\", \"Saving to same file name twice overwrites the first file.\", \"test_setting_changed (file_storage.tests.OverwritingStorageTests)\", \"test_base_url (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_custom_storage_discarding_empty_content (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_delete_deletes_directories (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_delete_no_name (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_empty_location (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_file_access_options (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_file_chunks_error (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_file_get_accessed_time (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_file_get_accessed_time_timezone (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_file_get_created_time (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_file_get_created_time_timezone (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_file_get_modified_time (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_file_get_modified_time_timezone (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_file_path (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_file_save_with_path (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_file_save_without_name (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_file_storage_prevents_directory_traversal (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_file_url (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_listdir (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_makedirs_race_handling (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_remove_race_handling (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_save_doesnt_close (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_setting_changed (file_storage.tests.DiscardingFalseContentStorageTests)\", \"test_base_url (file_storage.tests.CustomStorageTests)\", \"test_custom_get_available_name (file_storage.tests.CustomStorageTests)\", \"test_delete_deletes_directories (file_storage.tests.CustomStorageTests)\", \"test_delete_no_name (file_storage.tests.CustomStorageTests)\", \"test_empty_location (file_storage.tests.CustomStorageTests)\", \"test_file_access_options (file_storage.tests.CustomStorageTests)\", \"test_file_chunks_error (file_storage.tests.CustomStorageTests)\", \"test_file_get_accessed_time (file_storage.tests.CustomStorageTests)\", \"test_file_get_accessed_time_timezone (file_storage.tests.CustomStorageTests)\", \"test_file_get_created_time (file_storage.tests.CustomStorageTests)\", \"test_file_get_created_time_timezone (file_storage.tests.CustomStorageTests)\", \"test_file_get_modified_time (file_storage.tests.CustomStorageTests)\", \"test_file_get_modified_time_timezone (file_storage.tests.CustomStorageTests)\", \"test_file_path (file_storage.tests.CustomStorageTests)\", \"test_file_save_with_path (file_storage.tests.CustomStorageTests)\", \"test_file_save_without_name (file_storage.tests.CustomStorageTests)\", \"test_file_storage_prevents_directory_traversal (file_storage.tests.CustomStorageTests)\", \"test_file_url (file_storage.tests.CustomStorageTests)\", \"test_listdir (file_storage.tests.CustomStorageTests)\", \"test_makedirs_race_handling (file_storage.tests.CustomStorageTests)\", \"test_remove_race_handling (file_storage.tests.CustomStorageTests)\", \"test_save_doesnt_close (file_storage.tests.CustomStorageTests)\", \"test_setting_changed (file_storage.tests.CustomStorageTests)\", \"test_base_url (file_storage.tests.FileStorageTests)\", \"test_delete_deletes_directories (file_storage.tests.FileStorageTests)\", \"test_delete_no_name (file_storage.tests.FileStorageTests)\", \"test_empty_location (file_storage.tests.FileStorageTests)\", \"test_file_access_options (file_storage.tests.FileStorageTests)\", \"test_file_chunks_error (file_storage.tests.FileStorageTests)\", \"test_file_get_accessed_time (file_storage.tests.FileStorageTests)\", \"test_file_get_accessed_time_timezone (file_storage.tests.FileStorageTests)\", \"test_file_get_created_time (file_storage.tests.FileStorageTests)\", \"test_file_get_created_time_timezone (file_storage.tests.FileStorageTests)\", \"test_file_get_modified_time (file_storage.tests.FileStorageTests)\", \"test_file_get_modified_time_timezone (file_storage.tests.FileStorageTests)\", \"test_file_path (file_storage.tests.FileStorageTests)\", \"test_file_save_with_path (file_storage.tests.FileStorageTests)\", \"test_file_save_without_name (file_storage.tests.FileStorageTests)\", \"test_file_storage_prevents_directory_traversal (file_storage.tests.FileStorageTests)\", \"test_file_url (file_storage.tests.FileStorageTests)\", \"test_listdir (file_storage.tests.FileStorageTests)\", \"test_makedirs_race_handling (file_storage.tests.FileStorageTests)\", \"test_remove_race_handling (file_storage.tests.FileStorageTests)\", \"test_save_doesnt_close (file_storage.tests.FileStorageTests)\", \"test_setting_changed (file_storage.tests.FileStorageTests)\", \"test_urllib_request_urlopen (file_storage.tests.FileLikeObjectTestCase)\", \"test_race_condition (file_storage.tests.FileSaveRaceConditionTest)\"]", + "expected_spans": { + "django/conf/__init__.py": [ + "imports", + "LazySettings.configured", + "LazySettings" + ] + }, + "test_file_spans": { + "tests/file_storage/tests.py": [ + "FileStorageTests.test_setting_changed" + ], + "tests/settings_tests/tests.py": [ + "imports" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-11583", + "repo": "django/django", + "base_commit": "60dc957a825232fdda9138e2f8878b2ca407a7c9", + "problem_statement": "Auto-reloading with StatReloader very intermittently throws \"ValueError: embedded null byte\".\nDescription\n\t\nRaising this mainly so that it's tracked, as I have no idea how to reproduce it, nor why it's happening. It ultimately looks like a problem with Pathlib, which wasn't used prior to 2.2.\nStacktrace:\nTraceback (most recent call last):\n File \"manage.py\" ...\n\texecute_from_command_line(sys.argv)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/core/management/__init__.py\", line 381, in execute_from_command_line\n\tutility.execute()\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/core/management/__init__.py\", line 375, in execute\n\tself.fetch_command(subcommand).run_from_argv(self.argv)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/core/management/base.py\", line 323, in run_from_argv\n\tself.execute(*args, **cmd_options)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/core/management/commands/runserver.py\", line 60, in execute\n\tsuper().execute(*args, **options)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/core/management/base.py\", line 364, in execute\n\toutput = self.handle(*args, **options)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/core/management/commands/runserver.py\", line 95, in handle\n\tself.run(**options)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/core/management/commands/runserver.py\", line 102, in run\n\tautoreload.run_with_reloader(self.inner_run, **options)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 577, in run_with_reloader\n\tstart_django(reloader, main_func, *args, **kwargs)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 562, in start_django\n\treloader.run(django_main_thread)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 280, in run\n\tself.run_loop()\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 286, in run_loop\n\tnext(ticker)\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 326, in tick\n\tfor filepath, mtime in self.snapshot_files():\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 342, in snapshot_files\n\tfor file in self.watched_files():\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 241, in watched_files\n\tyield from iter_all_python_module_files()\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 103, in iter_all_python_module_files\n\treturn iter_modules_and_files(modules, frozenset(_error_files))\n File \"/Userz/kez/path/to/venv/lib/python3.6/site-packages/django/utils/autoreload.py\", line 132, in iter_modules_and_files\n\tresults.add(path.resolve().absolute())\n File \"/Users/kez/.pyenv/versions/3.6.2/lib/python3.6/pathlib.py\", line 1120, in resolve\n\ts = self._flavour.resolve(self, strict=strict)\n File \"/Users/kez/.pyenv/versions/3.6.2/lib/python3.6/pathlib.py\", line 346, in resolve\n\treturn _resolve(base, str(path)) or sep\n File \"/Users/kez/.pyenv/versions/3.6.2/lib/python3.6/pathlib.py\", line 330, in _resolve\n\ttarget = accessor.readlink(newpath)\n File \"/Users/kez/.pyenv/versions/3.6.2/lib/python3.6/pathlib.py\", line 441, in readlink\n\treturn os.readlink(path)\nValueError: embedded null byte\nI did print(path) before os.readlink(path) in pathlib and ended up with:\n/Users/kez\n/Users/kez/.pyenv\n/Users/kez/.pyenv/versions\n/Users/kez/.pyenv/versions/3.6.2\n/Users/kez/.pyenv/versions/3.6.2/lib\n/Users/kez/.pyenv/versions/3.6.2/lib/python3.6\n/Users/kez/.pyenv/versions/3.6.2/lib/python3.6/asyncio\n/Users/kez/.pyenv/versions/3.6.2/lib/python3.6/asyncio/selector_events.py\n/Users\nIt always seems to be /Users which is last\nIt may have already printed /Users as part of another .resolve() multiple times (that is, the order is not deterministic, and it may have traversed beyond /Users successfully many times during startup.\nI don't know where to begin looking for the rogue null byte, nor why it only exists sometimes.\nBest guess I have is that there's a mountpoint in /Users to a samba share which may not have been connected to yet? I dunno.\nI have no idea if it's fixable without removing the use of pathlib (which tbh I think should happen anyway, because it's slow) and reverting to using os.path.join and friends. \nI have no idea if it's fixed in a later Python version, but with no easy way to reproduce ... dunno how I'd check.\nI have no idea if it's something specific to my system (pyenv, OSX 10.11, etc)\n", + "golden_patch": "diff --git a/django/utils/autoreload.py b/django/utils/autoreload.py\n--- a/django/utils/autoreload.py\n+++ b/django/utils/autoreload.py\n@@ -143,6 +143,10 @@ def iter_modules_and_files(modules, extra_files):\n # The module could have been removed, don't fail loudly if this\n # is the case.\n continue\n+ except ValueError as e:\n+ # Network filesystems may return null bytes in file paths.\n+ logger.debug('\"%s\" raised when resolving path: \"%s\"' % (str(e), path))\n+ continue\n results.add(resolved_path)\n return frozenset(results)\n \n", + "test_patch": "diff --git a/tests/utils_tests/test_autoreload.py b/tests/utils_tests/test_autoreload.py\n--- a/tests/utils_tests/test_autoreload.py\n+++ b/tests/utils_tests/test_autoreload.py\n@@ -140,6 +140,17 @@ def test_main_module_without_file_is_not_resolved(self):\n fake_main = types.ModuleType('__main__')\n self.assertEqual(autoreload.iter_modules_and_files((fake_main,), frozenset()), frozenset())\n \n+ def test_path_with_embedded_null_bytes(self):\n+ for path in (\n+ 'embedded_null_byte\\x00.py',\n+ 'di\\x00rectory/embedded_null_byte.py',\n+ ):\n+ with self.subTest(path=path):\n+ self.assertEqual(\n+ autoreload.iter_modules_and_files((), frozenset([path])),\n+ frozenset(),\n+ )\n+\n \n class TestCommonRoots(SimpleTestCase):\n def test_common_roots(self):\n", + "fail_to_pass": "[\"test_path_with_embedded_null_bytes (utils_tests.test_autoreload.TestIterModulesAndFiles)\", \"test_paths_are_pathlib_instances (utils_tests.test_autoreload.TestIterModulesAndFiles)\"]", + "pass_to_pass": "[\"test_no_exception (utils_tests.test_autoreload.TestRaiseLastException)\", \"test_raises_custom_exception (utils_tests.test_autoreload.TestRaiseLastException)\", \"test_raises_exception (utils_tests.test_autoreload.TestRaiseLastException)\", \"test_raises_exception_with_context (utils_tests.test_autoreload.TestRaiseLastException)\", \"test_watchman_available (utils_tests.test_autoreload.GetReloaderTests)\", \"test_watchman_unavailable (utils_tests.test_autoreload.GetReloaderTests)\", \"test_common_roots (utils_tests.test_autoreload.TestCommonRoots)\", \"test_calls_start_django (utils_tests.test_autoreload.RunWithReloaderTests)\", \"test_calls_sys_exit (utils_tests.test_autoreload.RunWithReloaderTests)\", \"test_swallows_keyboard_interrupt (utils_tests.test_autoreload.RunWithReloaderTests)\", \"test_mutates_error_files (utils_tests.test_autoreload.TestCheckErrors)\", \"test_sys_paths_absolute (utils_tests.test_autoreload.TestSysPathDirectories)\", \"test_sys_paths_directories (utils_tests.test_autoreload.TestSysPathDirectories)\", \"test_sys_paths_non_existing (utils_tests.test_autoreload.TestSysPathDirectories)\", \"test_sys_paths_with_directories (utils_tests.test_autoreload.TestSysPathDirectories)\", \"test_manage_py (utils_tests.test_autoreload.RestartWithReloaderTests)\", \"test_python_m_django (utils_tests.test_autoreload.RestartWithReloaderTests)\", \"test_run_loop_catches_stopiteration (utils_tests.test_autoreload.BaseReloaderTests)\", \"test_run_loop_stop_and_return (utils_tests.test_autoreload.BaseReloaderTests)\", \"test_wait_for_apps_ready_checks_for_exception (utils_tests.test_autoreload.BaseReloaderTests)\", \"test_wait_for_apps_ready_without_exception (utils_tests.test_autoreload.BaseReloaderTests)\", \"test_watch_files_with_recursive_glob (utils_tests.test_autoreload.BaseReloaderTests)\", \"test_watch_with_glob (utils_tests.test_autoreload.BaseReloaderTests)\", \"test_watch_with_single_file (utils_tests.test_autoreload.BaseReloaderTests)\", \"test_watch_without_absolute (utils_tests.test_autoreload.BaseReloaderTests)\", \"test_file (utils_tests.test_autoreload.StatReloaderTests)\", \"test_glob (utils_tests.test_autoreload.StatReloaderTests)\", \"test_glob_recursive (utils_tests.test_autoreload.StatReloaderTests)\", \"test_multiple_globs (utils_tests.test_autoreload.StatReloaderTests)\", \"test_multiple_recursive_globs (utils_tests.test_autoreload.StatReloaderTests)\", \"test_nested_glob_recursive (utils_tests.test_autoreload.StatReloaderTests)\", \"test_overlapping_glob_recursive (utils_tests.test_autoreload.StatReloaderTests)\", \"test_overlapping_globs (utils_tests.test_autoreload.StatReloaderTests)\", \"test_snapshot_files_ignores_missing_files (utils_tests.test_autoreload.StatReloaderTests)\", \"test_snapshot_files_updates (utils_tests.test_autoreload.StatReloaderTests)\", \"test_snapshot_files_with_duplicates (utils_tests.test_autoreload.StatReloaderTests)\", \"test_tick_does_not_trigger_twice (utils_tests.test_autoreload.StatReloaderTests)\", \"test_check_errors_called (utils_tests.test_autoreload.StartDjangoTests)\", \"test_echo_on_called (utils_tests.test_autoreload.StartDjangoTests)\", \"test_starts_thread_with_args (utils_tests.test_autoreload.StartDjangoTests)\", \"test_watchman_becomes_unavailable (utils_tests.test_autoreload.StartDjangoTests)\", \".pyc and .pyo files are included in the files list.\", \"test_check_errors (utils_tests.test_autoreload.TestIterModulesAndFiles)\", \"test_check_errors_catches_all_exceptions (utils_tests.test_autoreload.TestIterModulesAndFiles)\", \"test_file_added (utils_tests.test_autoreload.TestIterModulesAndFiles)\", \"test_main_module_is_resolved (utils_tests.test_autoreload.TestIterModulesAndFiles)\", \"test_main_module_without_file_is_not_resolved (utils_tests.test_autoreload.TestIterModulesAndFiles)\", \"test_module_without_spec (utils_tests.test_autoreload.TestIterModulesAndFiles)\", \"iter_all_python_module_file() ignores weakref modules.\", \"test_zip_reload (utils_tests.test_autoreload.TestIterModulesAndFiles)\"]", + "expected_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "test_file_spans": { + "tests/utils_tests/test_autoreload.py": [ + "TestIterModulesAndFiles" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/utils/autoreload.py": [ + "imports", + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "imports", + "iter_modules_and_files" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/utils/autoreload.py": [ + "imports", + "check_errors", + "iter_all_python_module_files", + "iter_modules_and_files", + "common_roots", + "sys_path_directories", + "get_child_arguments", + "BaseReloader.__init__", + "BaseReloader.wait_for_apps_ready", + "BaseReloader.run", + "BaseReloader.check_availability", + "BaseReloader.notify_file_changed", + "StatReloader", + "StatReloader.tick", + "StatReloader.snapshot_files", + "WatchmanReloader.__init__", + "WatchmanReloader._watch_root", + "WatchmanReloader._subscribe", + "WatchmanReloader._subscribe_dir", + "WatchmanReloader._watch_glob", + "WatchmanReloader._update_watches", + "WatchmanReloader.update_watches", + "WatchmanReloader._check_subscription", + "WatchmanReloader.check_availability", + "start_django", + "run_with_reloader" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "imports", + "check_errors", + "iter_all_python_module_files", + "iter_modules_and_files", + "common_roots", + "sys_path_directories", + "get_child_arguments", + "BaseReloader.__init__", + "BaseReloader.wait_for_apps_ready", + "BaseReloader.run", + "BaseReloader.check_availability", + "BaseReloader.notify_file_changed", + "StatReloader", + "StatReloader.tick", + "StatReloader.snapshot_files", + "WatchmanReloader.__init__", + "WatchmanReloader._watch_root", + "WatchmanReloader._subscribe", + "WatchmanReloader._subscribe_dir", + "WatchmanReloader._watch_glob", + "WatchmanReloader._update_watches", + "WatchmanReloader.update_watches", + "WatchmanReloader._check_subscription", + "WatchmanReloader.check_availability", + "start_django", + "run_with_reloader" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files", + "sys_path_directories" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files", + "sys_path_directories" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/utils/autoreload.py": [ + "imports", + "iter_modules_and_files", + "sys_path_directories", + "StatReloader.snapshot_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "imports", + "iter_modules_and_files", + "sys_path_directories", + "StatReloader.snapshot_files" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/utils/autoreload.py": [ + "imports", + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "imports", + "iter_modules_and_files" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files", + "StatReloader.snapshot_files" + ] + }, + "alternative_spans": { + "django/utils/autoreload.py": [ + "iter_modules_and_files", + "StatReloader.snapshot_files" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-11620", + "repo": "django/django", + "base_commit": "514efa3129792ec2abb2444f3e7aeb3f21a38386", + "problem_statement": "When DEBUG is True, raising Http404 in a path converter's to_python method does not result in a technical response\nDescription\n\t\nThis is the response I get (plain text): \nA server error occurred. Please contact the administrator.\nI understand a ValueError should be raised which tells the URL resolver \"this path does not match, try next one\" but Http404 is what came to my mind intuitively and the error message was not very helpful.\nOne could also make a point that raising a Http404 should be valid way to tell the resolver \"this is indeed the right path but the current parameter value does not match anything so stop what you are doing and let the handler return the 404 page (including a helpful error message when DEBUG is True instead of the default 'Django tried these URL patterns')\".\nThis would prove useful for example to implement a path converter that uses get_object_or_404.\n", + "golden_patch": "diff --git a/django/views/debug.py b/django/views/debug.py\n--- a/django/views/debug.py\n+++ b/django/views/debug.py\n@@ -5,10 +5,10 @@\n from pathlib import Path\n \n from django.conf import settings\n-from django.http import HttpResponse, HttpResponseNotFound\n+from django.http import Http404, HttpResponse, HttpResponseNotFound\n from django.template import Context, Engine, TemplateDoesNotExist\n from django.template.defaultfilters import pprint\n-from django.urls import Resolver404, resolve\n+from django.urls import resolve\n from django.utils import timezone\n from django.utils.datastructures import MultiValueDict\n from django.utils.encoding import force_str\n@@ -483,7 +483,7 @@ def technical_404_response(request, exception):\n caller = ''\n try:\n resolver_match = resolve(request.path)\n- except Resolver404:\n+ except Http404:\n pass\n else:\n obj = resolver_match.func\n", + "test_patch": "diff --git a/tests/view_tests/tests/test_debug.py b/tests/view_tests/tests/test_debug.py\n--- a/tests/view_tests/tests/test_debug.py\n+++ b/tests/view_tests/tests/test_debug.py\n@@ -12,11 +12,13 @@\n from django.core import mail\n from django.core.files.uploadedfile import SimpleUploadedFile\n from django.db import DatabaseError, connection\n+from django.http import Http404\n from django.shortcuts import render\n from django.template import TemplateDoesNotExist\n from django.test import RequestFactory, SimpleTestCase, override_settings\n from django.test.utils import LoggingCaptureMixin\n from django.urls import path, reverse\n+from django.urls.converters import IntConverter\n from django.utils.functional import SimpleLazyObject\n from django.utils.safestring import mark_safe\n from django.views.debug import (\n@@ -237,6 +239,11 @@ def test_template_encoding(self):\n technical_404_response(mock.MagicMock(), mock.Mock())\n m.assert_called_once_with(encoding='utf-8')\n \n+ def test_technical_404_converter_raise_404(self):\n+ with mock.patch.object(IntConverter, 'to_python', side_effect=Http404):\n+ response = self.client.get('/path-post/1/')\n+ self.assertContains(response, 'Page not found', status_code=404)\n+\n \n class DebugViewQueriesAllowedTests(SimpleTestCase):\n # May need a query to initialize MySQL connection\n", + "fail_to_pass": "[\"test_technical_404_converter_raise_404 (view_tests.tests.test_debug.DebugViewTests)\"]", + "pass_to_pass": "[\"test_repr (view_tests.tests.test_debug.CallableSettingWrapperTests)\", \"test_cleanse_setting_basic (view_tests.tests.test_debug.HelperFunctionTests)\", \"test_cleanse_setting_ignore_case (view_tests.tests.test_debug.HelperFunctionTests)\", \"test_cleanse_setting_recurses_in_dictionary (view_tests.tests.test_debug.HelperFunctionTests)\", \"test_handle_db_exception (view_tests.tests.test_debug.DebugViewQueriesAllowedTests)\", \"test_400 (view_tests.tests.test_debug.NonDjangoTemplatesDebugViewTests)\", \"test_403 (view_tests.tests.test_debug.NonDjangoTemplatesDebugViewTests)\", \"test_404 (view_tests.tests.test_debug.NonDjangoTemplatesDebugViewTests)\", \"test_template_not_found_error (view_tests.tests.test_debug.NonDjangoTemplatesDebugViewTests)\", \"An exception report can be generated even for a disallowed host.\", \"test_message_only (view_tests.tests.test_debug.PlainTextReportTests)\", \"An exception report can be generated for just a request\", \"An exception report can be generated without request\", \"A simple exception report can be generated\", \"A message can be provided in addition to a request\", \"test_request_with_items_key (view_tests.tests.test_debug.PlainTextReportTests)\", \"test_template_exception (view_tests.tests.test_debug.PlainTextReportTests)\", \"test_ajax_response_encoding (view_tests.tests.test_debug.AjaxResponseExceptionReporterFilter)\", \"test_custom_exception_reporter_filter (view_tests.tests.test_debug.AjaxResponseExceptionReporterFilter)\", \"test_non_sensitive_request (view_tests.tests.test_debug.AjaxResponseExceptionReporterFilter)\", \"test_paranoid_request (view_tests.tests.test_debug.AjaxResponseExceptionReporterFilter)\", \"test_sensitive_request (view_tests.tests.test_debug.AjaxResponseExceptionReporterFilter)\", \"test_400 (view_tests.tests.test_debug.DebugViewTests)\", \"test_403 (view_tests.tests.test_debug.DebugViewTests)\", \"test_403_template (view_tests.tests.test_debug.DebugViewTests)\", \"test_404 (view_tests.tests.test_debug.DebugViewTests)\", \"test_404_empty_path_not_in_urls (view_tests.tests.test_debug.DebugViewTests)\", \"test_404_not_in_urls (view_tests.tests.test_debug.DebugViewTests)\", \"test_classbased_technical_404 (view_tests.tests.test_debug.DebugViewTests)\", \"test_default_urlconf_template (view_tests.tests.test_debug.DebugViewTests)\", \"test_files (view_tests.tests.test_debug.DebugViewTests)\", \"test_no_template_source_loaders (view_tests.tests.test_debug.DebugViewTests)\", \"test_non_l10ned_numeric_ids (view_tests.tests.test_debug.DebugViewTests)\", \"test_regression_21530 (view_tests.tests.test_debug.DebugViewTests)\", \"test_technical_404 (view_tests.tests.test_debug.DebugViewTests)\", \"test_template_encoding (view_tests.tests.test_debug.DebugViewTests)\", \"test_template_exceptions (view_tests.tests.test_debug.DebugViewTests)\", \"Tests for not existing file\", \"test_encoding_error (view_tests.tests.test_debug.ExceptionReporterTests)\", \"The ExceptionReporter supports Unix, Windows and Macintosh EOL markers\", \"test_exception_fetching_user (view_tests.tests.test_debug.ExceptionReporterTests)\", \"test_ignore_traceback_evaluation_exceptions (view_tests.tests.test_debug.ExceptionReporterTests)\", \"Safe strings in local variables are escaped.\", \"test_message_only (view_tests.tests.test_debug.ExceptionReporterTests)\", \"Non-UTF-8 exceptions/values should not make the output generation choke.\", \"test_reporting_frames_for_cyclic_reference (view_tests.tests.test_debug.ExceptionReporterTests)\", \"test_reporting_frames_without_source (view_tests.tests.test_debug.ExceptionReporterTests)\", \"test_reporting_of_nested_exceptions (view_tests.tests.test_debug.ExceptionReporterTests)\", \"test_request_with_items_key (view_tests.tests.test_debug.ExceptionReporterTests)\", \"test_template_encoding (view_tests.tests.test_debug.ExceptionReporterTests)\", \"Large values should not create a large HTML.\", \"test_unfrozen_importlib (view_tests.tests.test_debug.ExceptionReporterTests)\", \"Unprintable values should not make the output generation choke.\", \"test_callable_settings (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_callable_settings_forbidding_to_set_attributes (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_custom_exception_reporter_filter (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_dict_setting_with_non_str_key (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_multivalue_dict_key_error (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_non_sensitive_request (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_paranoid_request (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_sensitive_function_arguments (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_sensitive_function_keyword_arguments (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_sensitive_method (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_sensitive_request (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_sensitive_settings (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_settings_with_sensitive_keys (view_tests.tests.test_debug.ExceptionReporterFilterTests)\"]", + "expected_spans": { + "django/views/debug.py": [ + "imports", + "technical_404_response" + ] + }, + "test_file_spans": { + "tests/view_tests/tests/test_debug.py": [ + "imports", + "DebugViewTests" + ] + }, + "resolved_by": [ + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/urls/resolvers.py": [ + "imports", + "RoutePattern.match" + ], + "tests/urlpatterns/converters.py": [ + "imports", + "DynamicConverter", + "DynamicConverter.regex", + "DynamicConverter.regex_1", + "DynamicConverter.to_python", + "DynamicConverter.to_url", + "DynamicConverter.register_to_python" + ], + "tests/urlpatterns/path_dynamic_urls.py": [ + "imports" + ], + "tests/urlpatterns/tests.py": [ + "ConversionExceptionTests", + "ConversionExceptionTests.test_resolve_value_error_means_no_match", + "ConversionExceptionTests.test_resolve_type_error_propagates", + "ConversionExceptionTests.test_reverse_value_error_propagates" + ], + "tests/urlpatterns_reverse/tests.py": [] + }, + "alternative_spans": {} + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/urls/resolvers.py": [ + "imports", + "RoutePattern.match" + ] + }, + "alternative_spans": {} + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/urls/resolvers.py": [ + "docstring", + "RoutePattern.match" + ] + }, + "alternative_spans": {} + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/urls/resolvers.py": [ + "imports", + "RoutePattern.match" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/urls/resolvers.py": [ + "imports", + "URLResolver.resolve" + ], + "django/views/debug.py": [ + "technical_404_response" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "technical_404_response" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/core/handlers/exception.py": [ + "response_for_exception" + ], + "django/http/response.py": [ + "Http404" + ], + "django/urls/resolvers.py": [ + "imports", + "RoutePattern.match" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/urls/resolvers.py": [ + "imports", + "URLResolver.resolve" + ] + }, + "alternative_spans": {} + } + ], + "alternative_spans": [ + { + "run_name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "spans": { + "django/views/debug.py": [ + "technical_404_response" + ] + } + } + ] + }, + { + "instance_id": "pytest-dev__pytest-5692", + "repo": "pytest-dev/pytest", + "base_commit": "29e336bd9bf87eaef8e2683196ee1975f1ad4088", + "problem_statement": "Hostname and timestamp properties in generated JUnit XML reports\nPytest enables generating JUnit XML reports of the tests.\r\n\r\nHowever, there are some properties missing, specifically `hostname` and `timestamp` from the `testsuite` XML element. Is there an option to include them?\r\n\r\nExample of a pytest XML report:\r\n```xml\r\n\r\n\r\n\t\r\n\t\r\n\r\n```\r\n\r\nExample of a junit XML report:\r\n```xml\r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n\r\n```\n", + "golden_patch": "diff --git a/src/_pytest/junitxml.py b/src/_pytest/junitxml.py\n--- a/src/_pytest/junitxml.py\n+++ b/src/_pytest/junitxml.py\n@@ -10,9 +10,11 @@\n \"\"\"\n import functools\n import os\n+import platform\n import re\n import sys\n import time\n+from datetime import datetime\n \n import py\n \n@@ -666,6 +668,8 @@ def pytest_sessionfinish(self):\n skipped=self.stats[\"skipped\"],\n tests=numtests,\n time=\"%.3f\" % suite_time_delta,\n+ timestamp=datetime.fromtimestamp(self.suite_start_time).isoformat(),\n+ hostname=platform.node(),\n )\n logfile.write(Junit.testsuites([suite_node]).unicode(indent=0))\n logfile.close()\n", + "test_patch": "diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py\n--- a/testing/test_junitxml.py\n+++ b/testing/test_junitxml.py\n@@ -1,4 +1,6 @@\n import os\n+import platform\n+from datetime import datetime\n from xml.dom import minidom\n \n import py\n@@ -139,6 +141,30 @@ def test_xpass():\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(name=\"pytest\", errors=1, failures=2, skipped=1, tests=5)\n \n+ def test_hostname_in_xml(self, testdir):\n+ testdir.makepyfile(\n+ \"\"\"\n+ def test_pass():\n+ pass\n+ \"\"\"\n+ )\n+ result, dom = runandparse(testdir)\n+ node = dom.find_first_by_tag(\"testsuite\")\n+ node.assert_attr(hostname=platform.node())\n+\n+ def test_timestamp_in_xml(self, testdir):\n+ testdir.makepyfile(\n+ \"\"\"\n+ def test_pass():\n+ pass\n+ \"\"\"\n+ )\n+ start_time = datetime.now()\n+ result, dom = runandparse(testdir)\n+ node = dom.find_first_by_tag(\"testsuite\")\n+ timestamp = datetime.strptime(node[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S.%f\")\n+ assert start_time <= timestamp < datetime.now()\n+\n def test_timing_function(self, testdir):\n testdir.makepyfile(\n \"\"\"\n", + "fail_to_pass": "[\"testing/test_junitxml.py::TestPython::test_hostname_in_xml\", \"testing/test_junitxml.py::TestPython::test_timestamp_in_xml\"]", + "pass_to_pass": "[\"testing/test_junitxml.py::test_mangle_test_address\", \"testing/test_junitxml.py::test_dont_configure_on_slaves\", \"testing/test_junitxml.py::test_invalid_xml_escape\", \"testing/test_junitxml.py::test_logxml_path_expansion\", \"testing/test_junitxml.py::TestPython::test_summing_simple\", \"testing/test_junitxml.py::TestPython::test_summing_simple_with_errors\", \"testing/test_junitxml.py::TestPython::test_timing_function\", \"testing/test_junitxml.py::TestPython::test_junit_duration_report[call]\", \"testing/test_junitxml.py::TestPython::test_junit_duration_report[total]\", \"testing/test_junitxml.py::TestPython::test_setup_error\", \"testing/test_junitxml.py::TestPython::test_teardown_error\", \"testing/test_junitxml.py::TestPython::test_call_failure_teardown_error\", \"testing/test_junitxml.py::TestPython::test_skip_contains_name_reason\", \"testing/test_junitxml.py::TestPython::test_mark_skip_contains_name_reason\", \"testing/test_junitxml.py::TestPython::test_mark_skipif_contains_name_reason\", \"testing/test_junitxml.py::TestPython::test_mark_skip_doesnt_capture_output\", \"testing/test_junitxml.py::TestPython::test_classname_instance\", \"testing/test_junitxml.py::TestPython::test_classname_nested_dir\", \"testing/test_junitxml.py::TestPython::test_internal_error\", \"testing/test_junitxml.py::TestPython::test_failure_function[no]\", \"testing/test_junitxml.py::TestPython::test_failure_function[system-out]\", \"testing/test_junitxml.py::TestPython::test_failure_function[system-err]\", \"testing/test_junitxml.py::TestPython::test_failure_verbose_message\", \"testing/test_junitxml.py::TestPython::test_failure_escape\", \"testing/test_junitxml.py::TestPython::test_junit_prefixing\", \"testing/test_junitxml.py::TestPython::test_xfailure_function\", \"testing/test_junitxml.py::TestPython::test_xfailure_marker\", \"testing/test_junitxml.py::TestPython::test_xfail_captures_output_once\", \"testing/test_junitxml.py::TestPython::test_xfailure_xpass\", \"testing/test_junitxml.py::TestPython::test_xfailure_xpass_strict\", \"testing/test_junitxml.py::TestPython::test_collect_error\", \"testing/test_junitxml.py::TestPython::test_unicode\", \"testing/test_junitxml.py::TestPython::test_assertion_binchars\", \"testing/test_junitxml.py::TestPython::test_pass_captures_stdout\", \"testing/test_junitxml.py::TestPython::test_pass_captures_stderr\", \"testing/test_junitxml.py::TestPython::test_setup_error_captures_stdout\", \"testing/test_junitxml.py::TestPython::test_setup_error_captures_stderr\", \"testing/test_junitxml.py::TestPython::test_avoid_double_stdout\", \"testing/test_junitxml.py::TestNonPython::test_summing_simple\", \"testing/test_junitxml.py::test_nullbyte\", \"testing/test_junitxml.py::test_nullbyte_replace\", \"testing/test_junitxml.py::test_logxml_changingdir\", \"testing/test_junitxml.py::test_logxml_makedir\", \"testing/test_junitxml.py::test_logxml_check_isdir\", \"testing/test_junitxml.py::test_escaped_parametrized_names_xml\", \"testing/test_junitxml.py::test_double_colon_split_function_issue469\", \"testing/test_junitxml.py::test_double_colon_split_method_issue469\", \"testing/test_junitxml.py::test_unicode_issue368\", \"testing/test_junitxml.py::test_record_property\", \"testing/test_junitxml.py::test_record_property_same_name\", \"testing/test_junitxml.py::test_record_fixtures_without_junitxml[record_property]\", \"testing/test_junitxml.py::test_record_fixtures_without_junitxml[record_xml_attribute]\", \"testing/test_junitxml.py::test_record_attribute\", \"testing/test_junitxml.py::test_record_fixtures_xunit2[record_xml_attribute]\", \"testing/test_junitxml.py::test_record_fixtures_xunit2[record_property]\", \"testing/test_junitxml.py::test_root_testsuites_tag\", \"testing/test_junitxml.py::test_runs_twice\", \"testing/test_junitxml.py::test_fancy_items_regression\", \"testing/test_junitxml.py::test_global_properties\", \"testing/test_junitxml.py::test_url_property\", \"testing/test_junitxml.py::test_record_testsuite_property\", \"testing/test_junitxml.py::test_record_testsuite_property_junit_disabled\", \"testing/test_junitxml.py::test_record_testsuite_property_type_checking[True]\", \"testing/test_junitxml.py::test_record_testsuite_property_type_checking[False]\", \"testing/test_junitxml.py::test_set_suite_name[my_suite]\", \"testing/test_junitxml.py::test_set_suite_name[]\", \"testing/test_junitxml.py::test_escaped_skipreason_issue3533\", \"testing/test_junitxml.py::test_logging_passing_tests_disabled_does_not_log_test_output\"]", + "expected_spans": { + "src/_pytest/junitxml.py": [ + "imports", + "LogXML.pytest_sessionfinish" + ] + }, + "test_file_spans": { + "testing/test_junitxml.py": [ + "imports", + "TestPython.test_timing_function" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "imports", + "LogXML.__init__", + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "imports", + "LogXML.__init__", + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "Junit", + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "Junit", + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "LogXML", + "LogXML.__init__", + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "LogXML", + "LogXML.__init__", + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "imports", + "Junit", + "impl", + "bin_xml_escape", + "impl:16", + "_NodeReporter", + "_NodeReporter.make_properties_node", + "_NodeReporter.record_testreport", + "_NodeReporter.to_xml", + "_NodeReporter.write_captured_output", + "_NodeReporter.append_pass", + "_NodeReporter.append_failure", + "_NodeReporter.append_collect_error", + "_NodeReporter.append_collect_skipped", + "_NodeReporter.append_error", + "_NodeReporter.append_skipped", + "_NodeReporter.finalize", + "_warn_incompatibility_with_xunit2", + "record_property", + "record_xml_attribute", + "_check_record_param_type", + "record_testsuite_property", + "pytest_addoption", + "pytest_configure", + "pytest_unconfigure", + "mangle_test_address", + "LogXML.__init__", + "LogXML.finalize", + "LogXML.node_reporter", + "LogXML.pytest_runtest_logreport", + "LogXML.update_testcase_duration", + "LogXML.pytest_internalerror", + "LogXML.pytest_sessionstart", + "LogXML.pytest_sessionfinish", + "LogXML.pytest_terminal_summary", + "LogXML.add_global_property", + "LogXML._get_global_properties_node" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "imports", + "Junit", + "impl", + "bin_xml_escape", + "impl:16", + "_NodeReporter", + "_NodeReporter.make_properties_node", + "_NodeReporter.record_testreport", + "_NodeReporter.to_xml", + "_NodeReporter.write_captured_output", + "_NodeReporter.append_pass", + "_NodeReporter.append_failure", + "_NodeReporter.append_collect_error", + "_NodeReporter.append_collect_skipped", + "_NodeReporter.append_error", + "_NodeReporter.append_skipped", + "_NodeReporter.finalize", + "_warn_incompatibility_with_xunit2", + "record_property", + "record_xml_attribute", + "_check_record_param_type", + "record_testsuite_property", + "pytest_addoption", + "pytest_configure", + "pytest_unconfigure", + "mangle_test_address", + "LogXML.__init__", + "LogXML.finalize", + "LogXML.node_reporter", + "LogXML.pytest_runtest_logreport", + "LogXML.update_testcase_duration", + "LogXML.pytest_internalerror", + "LogXML.pytest_sessionstart", + "LogXML.pytest_sessionfinish", + "LogXML.pytest_terminal_summary", + "LogXML.add_global_property", + "LogXML._get_global_properties_node" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "docstring", + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "docstring", + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "imports", + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "imports", + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "LogXML", + "LogXML.finalize", + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "LogXML", + "LogXML.finalize", + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "imports", + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "imports", + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "imports", + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "imports", + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "LogXML", + "LogXML.finalize", + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "LogXML", + "LogXML.finalize", + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "imports", + "LogXML.pytest_sessionfinish", + "LogXML.pytest_terminal_summary" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "imports", + "LogXML.pytest_sessionfinish", + "LogXML.pytest_terminal_summary" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "Junit", + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "Junit", + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "imports", + "LogXML.pytest_sessionfinish" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "imports", + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "src/_pytest/junitxml.py": [ + "imports", + "LogXML.finalize", + "LogXML.pytest_sessionfinish" + ], + "testing/test_junitxml.py": [ + "TestPython.test_summing_simple", + "TestPython.test_summing_simple_with_errors", + "TestPython.test_timing_function", + "TestPython.test_junit_duration_report", + "TestPython.test_setup_error", + "TestPython.test_teardown_error", + "TestPython.test_call_failure_teardown_error", + "TestPython.test_skip_contains_name_reason", + "TestPython.test_mark_skip_contains_name_reason", + "TestPython.test_mark_skipif_contains_name_reason", + "TestPython.test_mark_skip_doesnt_capture_output", + "TestPython.test_classname_instance", + "TestPython.test_classname_nested_dir", + "TestPython.test_internal_error", + "TestPython.test_failure_function", + "TestPython.test_failure_verbose_message", + "TestPython.test_failure_escape", + "TestPython.test_junit_prefixing", + "TestPython.test_xfailure_function", + "TestPython.test_xfailure_marker", + "TestPython.test_xfail_captures_output_once", + "TestPython.test_xfailure_xpass", + "TestPython.test_xfailure_xpass_strict", + "TestPython.test_collect_error", + "TestPython.test_unicode", + "TestPython.test_pass_captures_stdout", + "TestPython.test_pass_captures_stderr", + "TestPython.test_setup_error_captures_stdout", + "TestPython.test_setup_error_captures_stderr", + "TestPython.test_avoid_double_stdout", + "test_dont_configure_on_slaves", + "TestNonPython.test_summing_simple", + "test_nullbyte", + "test_nullbyte_replace", + "test_escaped_parametrized_names_xml", + "test_double_colon_split_function_issue469", + "test_double_colon_split_method_issue469", + "test_unicode_issue368", + "test_record_property", + "test_record_property_same_name", + "test_record_fixtures_without_junitxml", + "test_record_attribute" + ] + }, + "alternative_spans": { + "src/_pytest/junitxml.py": [ + "imports", + "LogXML.finalize", + "LogXML.pytest_sessionfinish" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240524_opencsg_starship_gpt4", + "spans": { + "src/_pytest/junitxml.py": [ + "Junit", + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "run_name": "20240630_agentless_gpt4o", + "spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "run_name": "20240925_hyperagent_lite1", + "spans": { + "src/_pytest/junitxml.py": [ + "LogXML", + "LogXML.__init__", + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "run_name": "20240828_autose_mixed", + "spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "run_name": "20240702_codestory_aide_mixed", + "spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "run_name": "20240820_honeycomb", + "spans": { + "src/_pytest/junitxml.py": [ + "docstring", + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "run_name": "20240808_RepoGraph_gpt4o", + "spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "run_name": "20240829_Isoform", + "spans": { + "src/_pytest/junitxml.py": [ + "LogXML", + "LogXML.finalize", + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "run_name": "20241028_agentless-1.5_gpt4o", + "spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "run_name": "20240622_Lingma_Agent", + "spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "run_name": "20241016_IBM-SWE-1.0", + "spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "run_name": "20240612_IBM_Research_Agent101", + "spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "run_name": "20240621_autocoderover-v20240620", + "spans": { + "src/_pytest/junitxml.py": [ + "LogXML", + "LogXML.finalize", + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "run_name": "20240402_sweagent_gpt4", + "spans": { + "src/_pytest/junitxml.py": [ + "LogXML.pytest_sessionfinish" + ] + } + }, + { + "run_name": "20240912_marscode-agent-dev", + "spans": { + "src/_pytest/junitxml.py": [ + "Junit", + "LogXML.pytest_sessionfinish" + ] + } + } + ] + }, + { + "instance_id": "django__django-11630", + "repo": "django/django", + "base_commit": "65e86948b80262574058a94ccaae3a9b59c3faea", + "problem_statement": "Django throws error when different apps with different models have the same name table name.\nDescription\n\t\nError message:\ntable_name: (models.E028) db_table 'table_name' is used by multiple models: base.ModelName, app2.ModelName.\nWe have a Base app that points to a central database and that has its own tables. We then have multiple Apps that talk to their own databases. Some share the same table names.\nWe have used this setup for a while, but after upgrading to Django 2.2 we're getting an error saying we're not allowed 2 apps, with 2 different models to have the same table names. \nIs this correct behavior? We've had to roll back to Django 2.0 for now.\n", + "golden_patch": "diff --git a/django/core/checks/model_checks.py b/django/core/checks/model_checks.py\n--- a/django/core/checks/model_checks.py\n+++ b/django/core/checks/model_checks.py\n@@ -4,7 +4,8 @@\n from itertools import chain\n \n from django.apps import apps\n-from django.core.checks import Error, Tags, register\n+from django.conf import settings\n+from django.core.checks import Error, Tags, Warning, register\n \n \n @register(Tags.models)\n@@ -35,14 +36,25 @@ def check_all_models(app_configs=None, **kwargs):\n indexes[model_index.name].append(model._meta.label)\n for model_constraint in model._meta.constraints:\n constraints[model_constraint.name].append(model._meta.label)\n+ if settings.DATABASE_ROUTERS:\n+ error_class, error_id = Warning, 'models.W035'\n+ error_hint = (\n+ 'You have configured settings.DATABASE_ROUTERS. Verify that %s '\n+ 'are correctly routed to separate databases.'\n+ )\n+ else:\n+ error_class, error_id = Error, 'models.E028'\n+ error_hint = None\n for db_table, model_labels in db_table_models.items():\n if len(model_labels) != 1:\n+ model_labels_str = ', '.join(model_labels)\n errors.append(\n- Error(\n+ error_class(\n \"db_table '%s' is used by multiple models: %s.\"\n- % (db_table, ', '.join(db_table_models[db_table])),\n+ % (db_table, model_labels_str),\n obj=db_table,\n- id='models.E028',\n+ hint=(error_hint % model_labels_str) if error_hint else None,\n+ id=error_id,\n )\n )\n for index_name, model_labels in indexes.items():\n", + "test_patch": "diff --git a/tests/check_framework/test_model_checks.py b/tests/check_framework/test_model_checks.py\n--- a/tests/check_framework/test_model_checks.py\n+++ b/tests/check_framework/test_model_checks.py\n@@ -1,12 +1,16 @@\n from django.core import checks\n-from django.core.checks import Error\n+from django.core.checks import Error, Warning\n from django.db import models\n from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature\n from django.test.utils import (\n- isolate_apps, modify_settings, override_system_checks,\n+ isolate_apps, modify_settings, override_settings, override_system_checks,\n )\n \n \n+class EmptyRouter:\n+ pass\n+\n+\n @isolate_apps('check_framework', attr_name='apps')\n @override_system_checks([checks.model_checks.check_all_models])\n class DuplicateDBTableTests(SimpleTestCase):\n@@ -28,6 +32,30 @@ class Meta:\n )\n ])\n \n+ @override_settings(DATABASE_ROUTERS=['check_framework.test_model_checks.EmptyRouter'])\n+ def test_collision_in_same_app_database_routers_installed(self):\n+ class Model1(models.Model):\n+ class Meta:\n+ db_table = 'test_table'\n+\n+ class Model2(models.Model):\n+ class Meta:\n+ db_table = 'test_table'\n+\n+ self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), [\n+ Warning(\n+ \"db_table 'test_table' is used by multiple models: \"\n+ \"check_framework.Model1, check_framework.Model2.\",\n+ hint=(\n+ 'You have configured settings.DATABASE_ROUTERS. Verify '\n+ 'that check_framework.Model1, check_framework.Model2 are '\n+ 'correctly routed to separate databases.'\n+ ),\n+ obj='test_table',\n+ id='models.W035',\n+ )\n+ ])\n+\n @modify_settings(INSTALLED_APPS={'append': 'basic'})\n @isolate_apps('basic', 'check_framework', kwarg_name='apps')\n def test_collision_across_apps(self, apps):\n@@ -50,6 +78,34 @@ class Meta:\n )\n ])\n \n+ @modify_settings(INSTALLED_APPS={'append': 'basic'})\n+ @override_settings(DATABASE_ROUTERS=['check_framework.test_model_checks.EmptyRouter'])\n+ @isolate_apps('basic', 'check_framework', kwarg_name='apps')\n+ def test_collision_across_apps_database_routers_installed(self, apps):\n+ class Model1(models.Model):\n+ class Meta:\n+ app_label = 'basic'\n+ db_table = 'test_table'\n+\n+ class Model2(models.Model):\n+ class Meta:\n+ app_label = 'check_framework'\n+ db_table = 'test_table'\n+\n+ self.assertEqual(checks.run_checks(app_configs=apps.get_app_configs()), [\n+ Warning(\n+ \"db_table 'test_table' is used by multiple models: \"\n+ \"basic.Model1, check_framework.Model2.\",\n+ hint=(\n+ 'You have configured settings.DATABASE_ROUTERS. Verify '\n+ 'that basic.Model1, check_framework.Model2 are correctly '\n+ 'routed to separate databases.'\n+ ),\n+ obj='test_table',\n+ id='models.W035',\n+ )\n+ ])\n+\n def test_no_collision_for_unmanaged_models(self):\n class Unmanaged(models.Model):\n class Meta:\n", + "fail_to_pass": "[\"test_collision_across_apps_database_routers_installed (check_framework.test_model_checks.DuplicateDBTableTests)\", \"test_collision_in_same_app_database_routers_installed (check_framework.test_model_checks.DuplicateDBTableTests)\"]", + "pass_to_pass": "[\"test_collision_abstract_model (check_framework.test_model_checks.IndexNameTests)\", \"test_collision_across_apps (check_framework.test_model_checks.IndexNameTests)\", \"test_collision_in_different_models (check_framework.test_model_checks.IndexNameTests)\", \"test_collision_in_same_model (check_framework.test_model_checks.IndexNameTests)\", \"test_no_collision_abstract_model_interpolation (check_framework.test_model_checks.IndexNameTests)\", \"test_no_collision_across_apps_interpolation (check_framework.test_model_checks.IndexNameTests)\", \"test_collision_abstract_model (check_framework.test_model_checks.ConstraintNameTests)\", \"test_collision_across_apps (check_framework.test_model_checks.ConstraintNameTests)\", \"test_collision_in_different_models (check_framework.test_model_checks.ConstraintNameTests)\", \"test_collision_in_same_model (check_framework.test_model_checks.ConstraintNameTests)\", \"test_no_collision_abstract_model_interpolation (check_framework.test_model_checks.ConstraintNameTests)\", \"test_no_collision_across_apps_interpolation (check_framework.test_model_checks.ConstraintNameTests)\", \"test_collision_across_apps (check_framework.test_model_checks.DuplicateDBTableTests)\", \"test_collision_in_same_app (check_framework.test_model_checks.DuplicateDBTableTests)\", \"test_no_collision_for_proxy_models (check_framework.test_model_checks.DuplicateDBTableTests)\", \"test_no_collision_for_unmanaged_models (check_framework.test_model_checks.DuplicateDBTableTests)\"]", + "expected_spans": { + "django/core/checks/model_checks.py": [ + "imports", + "check_all_models" + ] + }, + "test_file_spans": { + "tests/check_framework/test_model_checks.py": [ + "imports", + "DuplicateDBTableTests.test_collision_across_apps", + "DuplicateDBTableTests.test_no_collision_for_unmanaged_models" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-11742", + "repo": "django/django", + "base_commit": "fee75d2aed4e58ada6567c464cfd22e89dc65f4a", + "problem_statement": "Add check to ensure max_length fits longest choice.\nDescription\n\t\nThere is currently no check to ensure that Field.max_length is large enough to fit the longest value in Field.choices.\nThis would be very helpful as often this mistake is not noticed until an attempt is made to save a record with those values that are too long.\n", + "golden_patch": "diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -257,6 +257,7 @@ def is_value(value, accept_promise=True):\n )\n ]\n \n+ choice_max_length = 0\n # Expect [group_name, [value, display]]\n for choices_group in self.choices:\n try:\n@@ -270,16 +271,32 @@ def is_value(value, accept_promise=True):\n for value, human_name in group_choices\n ):\n break\n+ if self.max_length is not None and group_choices:\n+ choice_max_length = max(\n+ choice_max_length,\n+ *(len(value) for value, _ in group_choices if isinstance(value, str)),\n+ )\n except (TypeError, ValueError):\n # No groups, choices in the form [value, display]\n value, human_name = group_name, group_choices\n if not is_value(value) or not is_value(human_name):\n break\n+ if self.max_length is not None and isinstance(value, str):\n+ choice_max_length = max(choice_max_length, len(value))\n \n # Special case: choices=['ab']\n if isinstance(choices_group, str):\n break\n else:\n+ if self.max_length is not None and choice_max_length > self.max_length:\n+ return [\n+ checks.Error(\n+ \"'max_length' is too small to fit the longest value \"\n+ \"in 'choices' (%d characters).\" % choice_max_length,\n+ obj=self,\n+ id='fields.E009',\n+ ),\n+ ]\n return []\n \n return [\n", + "test_patch": "diff --git a/tests/invalid_models_tests/test_ordinary_fields.py b/tests/invalid_models_tests/test_ordinary_fields.py\n--- a/tests/invalid_models_tests/test_ordinary_fields.py\n+++ b/tests/invalid_models_tests/test_ordinary_fields.py\n@@ -304,6 +304,32 @@ class Model(models.Model):\n \n self.assertEqual(Model._meta.get_field('field').check(), [])\n \n+ def test_choices_in_max_length(self):\n+ class Model(models.Model):\n+ field = models.CharField(\n+ max_length=2, choices=[\n+ ('ABC', 'Value Too Long!'), ('OK', 'Good')\n+ ],\n+ )\n+ group = models.CharField(\n+ max_length=2, choices=[\n+ ('Nested', [('OK', 'Good'), ('Longer', 'Longer')]),\n+ ('Grouped', [('Bad', 'Bad')]),\n+ ],\n+ )\n+\n+ for name, choice_max_length in (('field', 3), ('group', 6)):\n+ with self.subTest(name):\n+ field = Model._meta.get_field(name)\n+ self.assertEqual(field.check(), [\n+ Error(\n+ \"'max_length' is too small to fit the longest value \"\n+ \"in 'choices' (%d characters).\" % choice_max_length,\n+ obj=field,\n+ id='fields.E009',\n+ ),\n+ ])\n+\n def test_bad_db_index_value(self):\n class Model(models.Model):\n field = models.CharField(max_length=10, db_index='bad')\n", + "fail_to_pass": "[\"test_choices_in_max_length (invalid_models_tests.test_ordinary_fields.CharFieldTests)\", \"test_choices_named_group (invalid_models_tests.test_ordinary_fields.CharFieldTests)\"]", + "pass_to_pass": "[\"test_non_nullable_blank (invalid_models_tests.test_ordinary_fields.GenericIPAddressFieldTests)\", \"test_forbidden_files_and_folders (invalid_models_tests.test_ordinary_fields.FilePathFieldTests)\", \"test_max_length_warning (invalid_models_tests.test_ordinary_fields.IntegerFieldTests)\", \"test_primary_key (invalid_models_tests.test_ordinary_fields.FileFieldTests)\", \"test_upload_to_callable_not_checked (invalid_models_tests.test_ordinary_fields.FileFieldTests)\", \"test_upload_to_starts_with_slash (invalid_models_tests.test_ordinary_fields.FileFieldTests)\", \"test_valid_case (invalid_models_tests.test_ordinary_fields.FileFieldTests)\", \"test_valid_default_case (invalid_models_tests.test_ordinary_fields.FileFieldTests)\", \"test_str_default_value (invalid_models_tests.test_ordinary_fields.BinaryFieldTests)\", \"test_valid_default_value (invalid_models_tests.test_ordinary_fields.BinaryFieldTests)\", \"test_max_length_warning (invalid_models_tests.test_ordinary_fields.AutoFieldTests)\", \"test_primary_key (invalid_models_tests.test_ordinary_fields.AutoFieldTests)\", \"test_valid_case (invalid_models_tests.test_ordinary_fields.AutoFieldTests)\", \"test_fix_default_value (invalid_models_tests.test_ordinary_fields.DateTimeFieldTests)\", \"test_fix_default_value_tz (invalid_models_tests.test_ordinary_fields.DateTimeFieldTests)\", \"test_auto_now_and_auto_now_add_raise_error (invalid_models_tests.test_ordinary_fields.DateFieldTests)\", \"test_fix_default_value (invalid_models_tests.test_ordinary_fields.DateFieldTests)\", \"test_fix_default_value_tz (invalid_models_tests.test_ordinary_fields.DateFieldTests)\", \"test_fix_default_value (invalid_models_tests.test_ordinary_fields.TimeFieldTests)\", \"test_fix_default_value_tz (invalid_models_tests.test_ordinary_fields.TimeFieldTests)\", \"test_bad_values_of_max_digits_and_decimal_places (invalid_models_tests.test_ordinary_fields.DecimalFieldTests)\", \"test_decimal_places_greater_than_max_digits (invalid_models_tests.test_ordinary_fields.DecimalFieldTests)\", \"test_negative_max_digits_and_decimal_places (invalid_models_tests.test_ordinary_fields.DecimalFieldTests)\", \"test_required_attributes (invalid_models_tests.test_ordinary_fields.DecimalFieldTests)\", \"test_valid_field (invalid_models_tests.test_ordinary_fields.DecimalFieldTests)\", \"test_bad_db_index_value (invalid_models_tests.test_ordinary_fields.CharFieldTests)\", \"test_bad_max_length_value (invalid_models_tests.test_ordinary_fields.CharFieldTests)\", \"test_bad_validators (invalid_models_tests.test_ordinary_fields.CharFieldTests)\", \"test_choices_containing_lazy (invalid_models_tests.test_ordinary_fields.CharFieldTests)\", \"test_choices_containing_non_pairs (invalid_models_tests.test_ordinary_fields.CharFieldTests)\", \"test_choices_named_group_bad_structure (invalid_models_tests.test_ordinary_fields.CharFieldTests)\", \"test_choices_named_group_lazy (invalid_models_tests.test_ordinary_fields.CharFieldTests)\", \"test_choices_named_group_non_pairs (invalid_models_tests.test_ordinary_fields.CharFieldTests)\", \"test_iterable_of_iterable_choices (invalid_models_tests.test_ordinary_fields.CharFieldTests)\", \"test_lazy_choices (invalid_models_tests.test_ordinary_fields.CharFieldTests)\", \"test_missing_max_length (invalid_models_tests.test_ordinary_fields.CharFieldTests)\", \"test_negative_max_length (invalid_models_tests.test_ordinary_fields.CharFieldTests)\", \"test_non_iterable_choices (invalid_models_tests.test_ordinary_fields.CharFieldTests)\", \"Two letters isn't a valid choice pair.\", \"test_str_max_length_type (invalid_models_tests.test_ordinary_fields.CharFieldTests)\", \"test_str_max_length_value (invalid_models_tests.test_ordinary_fields.CharFieldTests)\", \"test_valid_field (invalid_models_tests.test_ordinary_fields.CharFieldTests)\", \"test_pillow_installed (invalid_models_tests.test_ordinary_fields.ImageFieldTests)\"]", + "expected_spans": { + "django/db/models/fields/__init__.py": [ + "Field._check_choices" + ] + }, + "test_file_spans": { + "tests/invalid_models_tests/test_ordinary_fields.py": [ + "CharFieldTests.test_bad_db_index_value" + ] + }, + "resolved_by": [ + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.check", + "Field._check_db_index" + ], + "tests/model_fields/test_charfield.py": [] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.check", + "Field._check_db_index" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "django/db/models/fields/__init__.py": [ + "Field.check", + "Field._check_db_index" + ] + } + } + ] + }, + { + "instance_id": "scikit-learn__scikit-learn-14894", + "repo": "scikit-learn/scikit-learn", + "base_commit": "fdbaa58acbead5a254f2e6d597dc1ab3b947f4c6", + "problem_statement": "ZeroDivisionError in _sparse_fit for SVM with empty support_vectors_\n#### Description\r\nWhen using sparse data, in the case where the support_vectors_ attribute is be empty, _fit_sparse gives a ZeroDivisionError\r\n\r\n#### Steps/Code to Reproduce\r\n```\r\nimport numpy as np\r\nimport scipy\r\nimport sklearn\r\nfrom sklearn.svm import SVR\r\nx_train = np.array([[0, 1, 0, 0],\r\n[0, 0, 0, 1],\r\n[0, 0, 1, 0],\r\n[0, 0, 0, 1]])\r\ny_train = np.array([0.04, 0.04, 0.10, 0.16])\r\nmodel = SVR(C=316.227766017, cache_size=200, coef0=0.0, degree=3, epsilon=0.1,\r\n \t gamma=1.0, kernel='linear', max_iter=15000,\r\n \t shrinking=True, tol=0.001, verbose=False)\r\n# dense x_train has no error\r\nmodel.fit(x_train, y_train)\r\n\r\n# convert to sparse\r\nxtrain= scipy.sparse.csr_matrix(x_train)\r\nmodel.fit(xtrain, y_train)\r\n\r\n```\r\n#### Expected Results\r\nNo error is thrown and `self.dual_coef_ = sp.csr_matrix([])`\r\n\r\n#### Actual Results\r\n```\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"/usr/local/lib/python3.5/dist-packages/sklearn/svm/base.py\", line 209, in fit\r\n fit(X, y, sample_weight, solver_type, kernel, random_seed=seed)\r\n File \"/usr/local/lib/python3.5/dist-packages/sklearn/svm/base.py\", line 302, in _sparse_fit\r\n dual_coef_indices.size / n_class)\r\nZeroDivisionError: float division by zero\r\n```\r\n\r\n#### Versions\r\n```\r\n>>> sklearn.show_versions() \r\n\r\nSystem:\r\nexecutable: /usr/bin/python3\r\n python: 3.5.2 (default, Nov 12 2018, 13:43:14) [GCC 5.4.0 20160609]\r\n machine: Linux-4.15.0-58-generic-x86_64-with-Ubuntu-16.04-xenial\r\n\r\nPython deps:\r\n numpy: 1.17.0\r\n Cython: None\r\n pip: 19.2.1\r\n pandas: 0.22.0\r\n sklearn: 0.21.3\r\n scipy: 1.3.0\r\nsetuptools: 40.4.3\r\n```\n", + "golden_patch": "diff --git a/sklearn/svm/base.py b/sklearn/svm/base.py\n--- a/sklearn/svm/base.py\n+++ b/sklearn/svm/base.py\n@@ -287,11 +287,14 @@ def _sparse_fit(self, X, y, sample_weight, solver_type, kernel,\n n_SV = self.support_vectors_.shape[0]\n \n dual_coef_indices = np.tile(np.arange(n_SV), n_class)\n- dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n- dual_coef_indices.size / n_class)\n- self.dual_coef_ = sp.csr_matrix(\n- (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n- (n_class, n_SV))\n+ if not n_SV:\n+ self.dual_coef_ = sp.csr_matrix([])\n+ else:\n+ dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,\n+ dual_coef_indices.size / n_class)\n+ self.dual_coef_ = sp.csr_matrix(\n+ (dual_coef_data, dual_coef_indices, dual_coef_indptr),\n+ (n_class, n_SV))\n \n def predict(self, X):\n \"\"\"Perform regression on samples in X.\n", + "test_patch": "diff --git a/sklearn/svm/tests/test_svm.py b/sklearn/svm/tests/test_svm.py\n--- a/sklearn/svm/tests/test_svm.py\n+++ b/sklearn/svm/tests/test_svm.py\n@@ -690,6 +690,19 @@ def test_sparse_precomputed():\n assert \"Sparse precomputed\" in str(e)\n \n \n+def test_sparse_fit_support_vectors_empty():\n+ # Regression test for #14893\n+ X_train = sparse.csr_matrix([[0, 1, 0, 0],\n+ [0, 0, 0, 1],\n+ [0, 0, 1, 0],\n+ [0, 0, 0, 1]])\n+ y_train = np.array([0.04, 0.04, 0.10, 0.16])\n+ model = svm.SVR(kernel='linear')\n+ model.fit(X_train, y_train)\n+ assert not model.support_vectors_.data.size\n+ assert not model.dual_coef_.data.size\n+\n+\n def test_linearsvc_parameters():\n # Test possible parameter combinations in LinearSVC\n # Generate list of possible parameter combinations\n", + "fail_to_pass": "[\"sklearn/svm/tests/test_svm.py::test_sparse_fit_support_vectors_empty\"]", + "pass_to_pass": "[\"sklearn/svm/tests/test_svm.py::test_libsvm_parameters\", \"sklearn/svm/tests/test_svm.py::test_libsvm_iris\", \"sklearn/svm/tests/test_svm.py::test_precomputed\", \"sklearn/svm/tests/test_svm.py::test_svr\", \"sklearn/svm/tests/test_svm.py::test_linearsvr\", \"sklearn/svm/tests/test_svm.py::test_linearsvr_fit_sampleweight\", \"sklearn/svm/tests/test_svm.py::test_svr_errors\", \"sklearn/svm/tests/test_svm.py::test_oneclass\", \"sklearn/svm/tests/test_svm.py::test_oneclass_decision_function\", \"sklearn/svm/tests/test_svm.py::test_oneclass_score_samples\", \"sklearn/svm/tests/test_svm.py::test_tweak_params\", \"sklearn/svm/tests/test_svm.py::test_probability\", \"sklearn/svm/tests/test_svm.py::test_decision_function\", \"sklearn/svm/tests/test_svm.py::test_decision_function_shape\", \"sklearn/svm/tests/test_svm.py::test_svr_predict\", \"sklearn/svm/tests/test_svm.py::test_weight\", \"sklearn/svm/tests/test_svm.py::test_svm_classifier_sided_sample_weight[estimator0]\", \"sklearn/svm/tests/test_svm.py::test_svm_classifier_sided_sample_weight[estimator1]\", \"sklearn/svm/tests/test_svm.py::test_svm_regressor_sided_sample_weight[estimator0]\", \"sklearn/svm/tests/test_svm.py::test_svm_regressor_sided_sample_weight[estimator1]\", \"sklearn/svm/tests/test_svm.py::test_svm_equivalence_sample_weight_C\", \"sklearn/svm/tests/test_svm.py::test_negative_sample_weights_mask_all_samples[weights-are-zero-SVC]\", \"sklearn/svm/tests/test_svm.py::test_negative_sample_weights_mask_all_samples[weights-are-zero-NuSVC]\", \"sklearn/svm/tests/test_svm.py::test_negative_sample_weights_mask_all_samples[weights-are-zero-SVR]\", \"sklearn/svm/tests/test_svm.py::test_negative_sample_weights_mask_all_samples[weights-are-zero-NuSVR]\", \"sklearn/svm/tests/test_svm.py::test_negative_sample_weights_mask_all_samples[weights-are-zero-OneClassSVM]\", \"sklearn/svm/tests/test_svm.py::test_negative_sample_weights_mask_all_samples[weights-are-negative-SVC]\", \"sklearn/svm/tests/test_svm.py::test_negative_sample_weights_mask_all_samples[weights-are-negative-NuSVC]\", \"sklearn/svm/tests/test_svm.py::test_negative_sample_weights_mask_all_samples[weights-are-negative-SVR]\", \"sklearn/svm/tests/test_svm.py::test_negative_sample_weights_mask_all_samples[weights-are-negative-NuSVR]\", \"sklearn/svm/tests/test_svm.py::test_negative_sample_weights_mask_all_samples[weights-are-negative-OneClassSVM]\", \"sklearn/svm/tests/test_svm.py::test_negative_weights_svc_leave_just_one_label[mask-label-1-SVC]\", \"sklearn/svm/tests/test_svm.py::test_negative_weights_svc_leave_just_one_label[mask-label-1-NuSVC]\", \"sklearn/svm/tests/test_svm.py::test_negative_weights_svc_leave_just_one_label[mask-label-2-SVC]\", \"sklearn/svm/tests/test_svm.py::test_negative_weights_svc_leave_just_one_label[mask-label-2-NuSVC]\", \"sklearn/svm/tests/test_svm.py::test_negative_weights_svc_leave_two_labels[partial-mask-label-1-SVC]\", \"sklearn/svm/tests/test_svm.py::test_negative_weights_svc_leave_two_labels[partial-mask-label-1-NuSVC]\", \"sklearn/svm/tests/test_svm.py::test_negative_weights_svc_leave_two_labels[partial-mask-label-2-SVC]\", \"sklearn/svm/tests/test_svm.py::test_negative_weights_svc_leave_two_labels[partial-mask-label-2-NuSVC]\", \"sklearn/svm/tests/test_svm.py::test_negative_weight_equal_coeffs[partial-mask-label-1-SVC]\", \"sklearn/svm/tests/test_svm.py::test_negative_weight_equal_coeffs[partial-mask-label-1-NuSVC]\", \"sklearn/svm/tests/test_svm.py::test_negative_weight_equal_coeffs[partial-mask-label-1-NuSVR]\", \"sklearn/svm/tests/test_svm.py::test_negative_weight_equal_coeffs[partial-mask-label-2-SVC]\", \"sklearn/svm/tests/test_svm.py::test_negative_weight_equal_coeffs[partial-mask-label-2-NuSVC]\", \"sklearn/svm/tests/test_svm.py::test_negative_weight_equal_coeffs[partial-mask-label-2-NuSVR]\", \"sklearn/svm/tests/test_svm.py::test_auto_weight\", \"sklearn/svm/tests/test_svm.py::test_bad_input\", \"sklearn/svm/tests/test_svm.py::test_svm_gamma_error[SVC-data0]\", \"sklearn/svm/tests/test_svm.py::test_svm_gamma_error[NuSVC-data1]\", \"sklearn/svm/tests/test_svm.py::test_svm_gamma_error[SVR-data2]\", \"sklearn/svm/tests/test_svm.py::test_svm_gamma_error[NuSVR-data3]\", \"sklearn/svm/tests/test_svm.py::test_svm_gamma_error[OneClassSVM-data4]\", \"sklearn/svm/tests/test_svm.py::test_unicode_kernel\", \"sklearn/svm/tests/test_svm.py::test_sparse_precomputed\", \"sklearn/svm/tests/test_svm.py::test_linearsvc_parameters\", \"sklearn/svm/tests/test_svm.py::test_linearsvx_loss_penalty_deprecations\", \"sklearn/svm/tests/test_svm.py::test_linear_svx_uppercase_loss_penality_raises_error\", \"sklearn/svm/tests/test_svm.py::test_linearsvc\", \"sklearn/svm/tests/test_svm.py::test_linearsvc_crammer_singer\", \"sklearn/svm/tests/test_svm.py::test_linearsvc_fit_sampleweight\", \"sklearn/svm/tests/test_svm.py::test_crammer_singer_binary\", \"sklearn/svm/tests/test_svm.py::test_linearsvc_iris\", \"sklearn/svm/tests/test_svm.py::test_dense_liblinear_intercept_handling\", \"sklearn/svm/tests/test_svm.py::test_liblinear_set_coef\", \"sklearn/svm/tests/test_svm.py::test_immutable_coef_property\", \"sklearn/svm/tests/test_svm.py::test_linearsvc_verbose\", \"sklearn/svm/tests/test_svm.py::test_svc_clone_with_callable_kernel\", \"sklearn/svm/tests/test_svm.py::test_svc_bad_kernel\", \"sklearn/svm/tests/test_svm.py::test_timeout\", \"sklearn/svm/tests/test_svm.py::test_unfitted\", \"sklearn/svm/tests/test_svm.py::test_consistent_proba\", \"sklearn/svm/tests/test_svm.py::test_linear_svm_convergence_warnings\", \"sklearn/svm/tests/test_svm.py::test_svr_coef_sign\", \"sklearn/svm/tests/test_svm.py::test_linear_svc_intercept_scaling\", \"sklearn/svm/tests/test_svm.py::test_lsvc_intercept_scaling_zero\", \"sklearn/svm/tests/test_svm.py::test_hasattr_predict_proba\", \"sklearn/svm/tests/test_svm.py::test_decision_function_shape_two_class\", \"sklearn/svm/tests/test_svm.py::test_ovr_decision_function\", \"sklearn/svm/tests/test_svm.py::test_svc_invalid_break_ties_param[SVC]\", \"sklearn/svm/tests/test_svm.py::test_svc_invalid_break_ties_param[NuSVC]\", \"sklearn/svm/tests/test_svm.py::test_svc_ovr_tie_breaking[SVC]\", \"sklearn/svm/tests/test_svm.py::test_svc_ovr_tie_breaking[NuSVC]\", \"sklearn/svm/tests/test_svm.py::test_gamma_auto\", \"sklearn/svm/tests/test_svm.py::test_gamma_scale\", \"sklearn/svm/tests/test_svm.py::test_n_support_oneclass_svr\"]", + "expected_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "test_file_spans": { + "sklearn/svm/tests/test_svm.py": [ + "test_linearsvc_parameters" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sklearn/svm/base.py": [ + "imports", + "_one_vs_one_coef", + "BaseLibSVM", + "BaseLibSVM.__init__", + "BaseLibSVM._pairwise", + "BaseLibSVM.fit", + "BaseLibSVM._validate_targets", + "BaseLibSVM._warn_from_fit_status", + "BaseLibSVM._dense_fit", + "BaseLibSVM._sparse_fit", + "BaseLibSVM._dense_predict", + "BaseLibSVM._sparse_predict", + "BaseLibSVM._compute_kernel", + "BaseLibSVM._decision_function", + "BaseLibSVM._dense_decision_function", + "BaseLibSVM._sparse_decision_function", + "BaseLibSVM._validate_for_predict", + "BaseLibSVM.coef_", + "BaseLibSVM.n_support_", + "BaseSVC", + "BaseSVC.__init__", + "BaseSVC._validate_targets", + "BaseSVC.predict", + "BaseSVC._check_proba", + "BaseSVC._predict_proba", + "BaseSVC._dense_predict_proba", + "BaseSVC._sparse_predict_proba", + "BaseSVC._get_coef", + "_get_liblinear_solver_type", + "_fit_liblinear" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "imports", + "_one_vs_one_coef", + "BaseLibSVM", + "BaseLibSVM.__init__", + "BaseLibSVM._pairwise", + "BaseLibSVM.fit", + "BaseLibSVM._validate_targets", + "BaseLibSVM._warn_from_fit_status", + "BaseLibSVM._dense_fit", + "BaseLibSVM._sparse_fit", + "BaseLibSVM._dense_predict", + "BaseLibSVM._sparse_predict", + "BaseLibSVM._compute_kernel", + "BaseLibSVM._decision_function", + "BaseLibSVM._dense_decision_function", + "BaseLibSVM._sparse_decision_function", + "BaseLibSVM._validate_for_predict", + "BaseLibSVM.coef_", + "BaseLibSVM.n_support_", + "BaseSVC", + "BaseSVC.__init__", + "BaseSVC._validate_targets", + "BaseSVC.predict", + "BaseSVC._check_proba", + "BaseSVC._predict_proba", + "BaseSVC._dense_predict_proba", + "BaseSVC._sparse_predict_proba", + "BaseSVC._get_coef", + "_get_liblinear_solver_type", + "_fit_liblinear" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit", + "BaseLibSVM.predict" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit", + "BaseLibSVM.predict" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + }, + "alternative_spans": { + "sklearn/svm/base.py": [ + "BaseLibSVM._sparse_fit" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "scikit-learn__scikit-learn-14983", + "repo": "scikit-learn/scikit-learn", + "base_commit": "06632c0d185128a53c57ccc73b25b6408e90bb89", + "problem_statement": "RepeatedKFold and RepeatedStratifiedKFold do not show correct __repr__ string\n#### Description\r\n\r\n`RepeatedKFold` and `RepeatedStratifiedKFold` do not show correct \\_\\_repr\\_\\_ string.\r\n\r\n#### Steps/Code to Reproduce\r\n\r\n```python\r\n>>> from sklearn.model_selection import RepeatedKFold, RepeatedStratifiedKFold\r\n>>> repr(RepeatedKFold())\r\n>>> repr(RepeatedStratifiedKFold())\r\n```\r\n\r\n#### Expected Results\r\n\r\n```python\r\n>>> repr(RepeatedKFold())\r\nRepeatedKFold(n_splits=5, n_repeats=10, random_state=None)\r\n>>> repr(RepeatedStratifiedKFold())\r\nRepeatedStratifiedKFold(n_splits=5, n_repeats=10, random_state=None)\r\n```\r\n\r\n#### Actual Results\r\n\r\n```python\r\n>>> repr(RepeatedKFold())\r\n''\r\n>>> repr(RepeatedStratifiedKFold())\r\n''\r\n```\r\n\r\n#### Versions\r\n```\r\nSystem:\r\n python: 3.7.4 (default, Aug 9 2019, 18:34:13) [MSC v.1915 64 bit (AMD64)]\r\nexecutable: D:\\anaconda3\\envs\\xyz\\python.exe\r\n machine: Windows-10-10.0.16299-SP0\r\n\r\nBLAS:\r\n macros:\r\n lib_dirs:\r\ncblas_libs: cblas\r\n\r\nPython deps:\r\n pip: 19.2.2\r\nsetuptools: 41.0.1\r\n sklearn: 0.21.2\r\n numpy: 1.16.4\r\n scipy: 1.3.1\r\n Cython: None\r\n pandas: 0.24.2\r\n```\n", + "golden_patch": "diff --git a/sklearn/model_selection/_split.py b/sklearn/model_selection/_split.py\n--- a/sklearn/model_selection/_split.py\n+++ b/sklearn/model_selection/_split.py\n@@ -1163,6 +1163,9 @@ def get_n_splits(self, X=None, y=None, groups=None):\n **self.cvargs)\n return cv.get_n_splits(X, y, groups) * self.n_repeats\n \n+ def __repr__(self):\n+ return _build_repr(self)\n+\n \n class RepeatedKFold(_RepeatedSplits):\n \"\"\"Repeated K-Fold cross validator.\n@@ -2158,6 +2161,8 @@ def _build_repr(self):\n try:\n with warnings.catch_warnings(record=True) as w:\n value = getattr(self, key, None)\n+ if value is None and hasattr(self, 'cvargs'):\n+ value = self.cvargs.get(key, None)\n if len(w) and w[0].category == DeprecationWarning:\n # if the parameter is deprecated, don't show it\n continue\n", + "test_patch": "diff --git a/sklearn/model_selection/tests/test_split.py b/sklearn/model_selection/tests/test_split.py\n--- a/sklearn/model_selection/tests/test_split.py\n+++ b/sklearn/model_selection/tests/test_split.py\n@@ -980,6 +980,17 @@ def test_repeated_cv_value_errors():\n assert_raises(ValueError, cv, n_repeats=1.5)\n \n \n+@pytest.mark.parametrize(\n+ \"RepeatedCV\", [RepeatedKFold, RepeatedStratifiedKFold]\n+)\n+def test_repeated_cv_repr(RepeatedCV):\n+ n_splits, n_repeats = 2, 6\n+ repeated_cv = RepeatedCV(n_splits=n_splits, n_repeats=n_repeats)\n+ repeated_cv_repr = ('{}(n_repeats=6, n_splits=2, random_state=None)'\n+ .format(repeated_cv.__class__.__name__))\n+ assert repeated_cv_repr == repr(repeated_cv)\n+\n+\n def test_repeated_kfold_determinstic_split():\n X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]\n random_state = 258173307\n", + "fail_to_pass": "[\"sklearn/model_selection/tests/test_split.py::test_repeated_cv_repr[RepeatedKFold]\", \"sklearn/model_selection/tests/test_split.py::test_repeated_cv_repr[RepeatedStratifiedKFold]\"]", + "pass_to_pass": "[\"sklearn/model_selection/tests/test_split.py::test_cross_validator_with_default_params\", \"sklearn/model_selection/tests/test_split.py::test_2d_y\", \"sklearn/model_selection/tests/test_split.py::test_kfold_valueerrors\", \"sklearn/model_selection/tests/test_split.py::test_kfold_indices\", \"sklearn/model_selection/tests/test_split.py::test_kfold_no_shuffle\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_no_shuffle\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_ratios[4-False]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_ratios[4-True]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_ratios[5-False]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_ratios[5-True]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_ratios[6-False]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_ratios[6-True]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_ratios[7-False]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_ratios[7-True]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_ratios[8-False]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_ratios[8-True]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_ratios[9-False]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_ratios[9-True]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_ratios[10-False]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_ratios[10-True]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_label_invariance[4-False]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_label_invariance[4-True]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_label_invariance[6-False]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_label_invariance[6-True]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_label_invariance[7-False]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_kfold_label_invariance[7-True]\", \"sklearn/model_selection/tests/test_split.py::test_kfold_balance\", \"sklearn/model_selection/tests/test_split.py::test_stratifiedkfold_balance\", \"sklearn/model_selection/tests/test_split.py::test_shuffle_kfold\", \"sklearn/model_selection/tests/test_split.py::test_shuffle_kfold_stratifiedkfold_reproducibility\", \"sklearn/model_selection/tests/test_split.py::test_shuffle_stratifiedkfold\", \"sklearn/model_selection/tests/test_split.py::test_kfold_can_detect_dependent_samples_on_digits\", \"sklearn/model_selection/tests/test_split.py::test_shuffle_split\", \"sklearn/model_selection/tests/test_split.py::test_shuffle_split_default_test_size[None-9-1-ShuffleSplit]\", \"sklearn/model_selection/tests/test_split.py::test_shuffle_split_default_test_size[None-9-1-StratifiedShuffleSplit]\", \"sklearn/model_selection/tests/test_split.py::test_shuffle_split_default_test_size[8-8-2-ShuffleSplit]\", \"sklearn/model_selection/tests/test_split.py::test_shuffle_split_default_test_size[8-8-2-StratifiedShuffleSplit]\", \"sklearn/model_selection/tests/test_split.py::test_shuffle_split_default_test_size[0.8-8-2-ShuffleSplit]\", \"sklearn/model_selection/tests/test_split.py::test_shuffle_split_default_test_size[0.8-8-2-StratifiedShuffleSplit]\", \"sklearn/model_selection/tests/test_split.py::test_group_shuffle_split_default_test_size[None-8-2]\", \"sklearn/model_selection/tests/test_split.py::test_group_shuffle_split_default_test_size[7-7-3]\", \"sklearn/model_selection/tests/test_split.py::test_group_shuffle_split_default_test_size[0.7-7-3]\", \"sklearn/model_selection/tests/test_split.py::test_stratified_shuffle_split_init\", \"sklearn/model_selection/tests/test_split.py::test_stratified_shuffle_split_respects_test_size\", \"sklearn/model_selection/tests/test_split.py::test_stratified_shuffle_split_iter\", \"sklearn/model_selection/tests/test_split.py::test_stratified_shuffle_split_even\", \"sklearn/model_selection/tests/test_split.py::test_stratified_shuffle_split_overlap_train_test_bug\", \"sklearn/model_selection/tests/test_split.py::test_stratified_shuffle_split_multilabel\", \"sklearn/model_selection/tests/test_split.py::test_stratified_shuffle_split_multilabel_many_labels\", \"sklearn/model_selection/tests/test_split.py::test_predefinedsplit_with_kfold_split\", \"sklearn/model_selection/tests/test_split.py::test_group_shuffle_split\", \"sklearn/model_selection/tests/test_split.py::test_leave_one_p_group_out\", \"sklearn/model_selection/tests/test_split.py::test_leave_group_out_changing_groups\", \"sklearn/model_selection/tests/test_split.py::test_leave_one_p_group_out_error_on_fewer_number_of_groups\", \"sklearn/model_selection/tests/test_split.py::test_repeated_cv_value_errors\", \"sklearn/model_selection/tests/test_split.py::test_repeated_kfold_determinstic_split\", \"sklearn/model_selection/tests/test_split.py::test_get_n_splits_for_repeated_kfold\", \"sklearn/model_selection/tests/test_split.py::test_get_n_splits_for_repeated_stratified_kfold\", \"sklearn/model_selection/tests/test_split.py::test_repeated_stratified_kfold_determinstic_split\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_errors\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_invalid_sizes1[1.2-0.8]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_invalid_sizes1[1.0-0.8]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_invalid_sizes1[0.0-0.8]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_invalid_sizes1[-0.2-0.8]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_invalid_sizes1[0.8-1.2]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_invalid_sizes1[0.8-1.0]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_invalid_sizes1[0.8-0.0]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_invalid_sizes1[0.8--0.2]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_invalid_sizes2[-10-0.8]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_invalid_sizes2[0-0.8]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_invalid_sizes2[11-0.8]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_invalid_sizes2[0.8--10]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_invalid_sizes2[0.8-0]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_invalid_sizes2[0.8-11]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_default_test_size[None-7-3]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_default_test_size[8-8-2]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_default_test_size[0.8-8-2]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_pandas\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_sparse\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_mock_pandas\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_list_input\", \"sklearn/model_selection/tests/test_split.py::test_shufflesplit_errors[2.0-None]\", \"sklearn/model_selection/tests/test_split.py::test_shufflesplit_errors[1.0-None]\", \"sklearn/model_selection/tests/test_split.py::test_shufflesplit_errors[0.1-0.95]\", \"sklearn/model_selection/tests/test_split.py::test_shufflesplit_errors[None-train_size3]\", \"sklearn/model_selection/tests/test_split.py::test_shufflesplit_errors[11-None]\", \"sklearn/model_selection/tests/test_split.py::test_shufflesplit_errors[10-None]\", \"sklearn/model_selection/tests/test_split.py::test_shufflesplit_errors[8-3]\", \"sklearn/model_selection/tests/test_split.py::test_shufflesplit_reproducible\", \"sklearn/model_selection/tests/test_split.py::test_stratifiedshufflesplit_list_input\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_allow_nans\", \"sklearn/model_selection/tests/test_split.py::test_check_cv\", \"sklearn/model_selection/tests/test_split.py::test_cv_iterable_wrapper\", \"sklearn/model_selection/tests/test_split.py::test_group_kfold\", \"sklearn/model_selection/tests/test_split.py::test_time_series_cv\", \"sklearn/model_selection/tests/test_split.py::test_time_series_max_train_size\", \"sklearn/model_selection/tests/test_split.py::test_nested_cv\", \"sklearn/model_selection/tests/test_split.py::test_build_repr\", \"sklearn/model_selection/tests/test_split.py::test_shuffle_split_empty_trainset[ShuffleSplit]\", \"sklearn/model_selection/tests/test_split.py::test_shuffle_split_empty_trainset[GroupShuffleSplit]\", \"sklearn/model_selection/tests/test_split.py::test_shuffle_split_empty_trainset[StratifiedShuffleSplit]\", \"sklearn/model_selection/tests/test_split.py::test_train_test_split_empty_trainset\", \"sklearn/model_selection/tests/test_split.py::test_leave_one_out_empty_trainset\", \"sklearn/model_selection/tests/test_split.py::test_leave_p_out_empty_trainset\"]", + "expected_spans": { + "sklearn/model_selection/_split.py": [ + "_RepeatedSplits", + "_build_repr" + ] + }, + "test_file_spans": { + "sklearn/model_selection/tests/test_split.py": [ + "test_repeated_kfold_determinstic_split" + ] + }, + "resolved_by": [ + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sklearn/model_selection/_split.py": [ + "_RepeatedSplits", + "_RepeatedSplits.__init__" + ] + }, + "alternative_spans": { + "sklearn/model_selection/_split.py": [ + "_RepeatedSplits", + "_RepeatedSplits.__init__" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sklearn/model_selection/_split.py": [ + "RepeatedKFold", + "RepeatedStratifiedKFold" + ], + "sklearn/model_selection/tests/test_split.py": [ + "test_repeated_stratified_kfold_determinstic_split", + "test_train_test_split_allow_nans" + ] + }, + "alternative_spans": { + "sklearn/model_selection/_split.py": [ + "RepeatedKFold", + "RepeatedStratifiedKFold" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "spans": { + "sklearn/model_selection/_split.py": [ + "_RepeatedSplits", + "_RepeatedSplits.__init__" + ] + } + }, + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "sklearn/model_selection/_split.py": [ + "RepeatedKFold", + "RepeatedStratifiedKFold" + ] + } + } + ] + }, + { + "instance_id": "sympy__sympy-17630", + "repo": "sympy/sympy", + "base_commit": "58e78209c8577b9890e957b624466e5beed7eb08", + "problem_statement": "Exception when multiplying BlockMatrix containing ZeroMatrix blocks\nWhen a block matrix with zero blocks is defined\r\n\r\n```\r\n>>> from sympy import *\r\n>>> a = MatrixSymbol(\"a\", 2, 2)\r\n>>> z = ZeroMatrix(2, 2)\r\n>>> b = BlockMatrix([[a, z], [z, z]])\r\n```\r\n\r\nthen block-multiplying it once seems to work fine:\r\n\r\n```\r\n>>> block_collapse(b * b)\r\nMatrix([\r\n[a**2, 0],\r\n[0, 0]])\r\n>>> b._blockmul(b)\r\nMatrix([\r\n[a**2, 0],\r\n[0, 0]])\r\n```\r\n\r\nbut block-multiplying twice throws an exception:\r\n\r\n```\r\n>>> block_collapse(b * b * b)\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/matrices/expressions/blockmatrix.py\", line 297, in block_collapse\r\n result = rule(expr)\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/strategies/core.py\", line 11, in exhaustive_rl\r\n new, old = rule(expr), expr\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/strategies/core.py\", line 44, in chain_rl\r\n expr = rule(expr)\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/strategies/core.py\", line 11, in exhaustive_rl\r\n new, old = rule(expr), expr\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/strategies/core.py\", line 33, in conditioned_rl\r\n return rule(expr)\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/strategies/core.py\", line 95, in switch_rl\r\n return rl(expr)\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/matrices/expressions/blockmatrix.py\", line 361, in bc_matmul\r\n matrices[i] = A._blockmul(B)\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/matrices/expressions/blockmatrix.py\", line 91, in _blockmul\r\n self.colblocksizes == other.rowblocksizes):\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/matrices/expressions/blockmatrix.py\", line 80, in colblocksizes\r\n return [self.blocks[0, i].cols for i in range(self.blockshape[1])]\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/matrices/expressions/blockmatrix.py\", line 80, in \r\n return [self.blocks[0, i].cols for i in range(self.blockshape[1])]\r\nAttributeError: 'Zero' object has no attribute 'cols'\r\n>>> b._blockmul(b)._blockmul(b)\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/matrices/expressions/blockmatrix.py\", line 91, in _blockmul\r\n self.colblocksizes == other.rowblocksizes):\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/matrices/expressions/blockmatrix.py\", line 80, in colblocksizes\r\n return [self.blocks[0, i].cols for i in range(self.blockshape[1])]\r\n File \"/home/jan/.pyenv/versions/3.7.4/lib/python3.7/site-packages/sympy/matrices/expressions/blockmatrix.py\", line 80, in \r\n return [self.blocks[0, i].cols for i in range(self.blockshape[1])]\r\nAttributeError: 'Zero' object has no attribute 'cols'\r\n```\r\n\r\nThis seems to be caused by the fact that the zeros in `b._blockmul(b)` are not `ZeroMatrix` but `Zero`:\r\n\r\n```\r\n>>> type(b._blockmul(b).blocks[0, 1])\r\n\r\n```\r\n\r\nHowever, I don't understand SymPy internals well enough to find out why this happens. I use Python 3.7.4 and sympy 1.4 (installed with pip).\n", + "golden_patch": "diff --git a/sympy/matrices/expressions/matexpr.py b/sympy/matrices/expressions/matexpr.py\n--- a/sympy/matrices/expressions/matexpr.py\n+++ b/sympy/matrices/expressions/matexpr.py\n@@ -627,6 +627,8 @@ def _postprocessor(expr):\n # manipulate them like non-commutative scalars.\n return cls._from_args(nonmatrices + [mat_class(*matrices).doit(deep=False)])\n \n+ if mat_class == MatAdd:\n+ return mat_class(*matrices).doit(deep=False)\n return mat_class(cls._from_args(nonmatrices), *matrices).doit(deep=False)\n return _postprocessor\n \n", + "test_patch": "diff --git a/sympy/matrices/expressions/tests/test_blockmatrix.py b/sympy/matrices/expressions/tests/test_blockmatrix.py\n--- a/sympy/matrices/expressions/tests/test_blockmatrix.py\n+++ b/sympy/matrices/expressions/tests/test_blockmatrix.py\n@@ -3,7 +3,7 @@\n BlockMatrix, bc_dist, bc_matadd, bc_transpose, bc_inverse,\n blockcut, reblock_2x2, deblock)\n from sympy.matrices.expressions import (MatrixSymbol, Identity,\n- Inverse, trace, Transpose, det)\n+ Inverse, trace, Transpose, det, ZeroMatrix)\n from sympy.matrices import (\n Matrix, ImmutableMatrix, ImmutableSparseMatrix)\n from sympy.core import Tuple, symbols, Expr\n@@ -104,6 +104,13 @@ def test_block_collapse_explicit_matrices():\n A = ImmutableSparseMatrix([[1, 2], [3, 4]])\n assert block_collapse(BlockMatrix([[A]])) == A\n \n+def test_issue_17624():\n+ a = MatrixSymbol(\"a\", 2, 2)\n+ z = ZeroMatrix(2, 2)\n+ b = BlockMatrix([[a, z], [z, z]])\n+ assert block_collapse(b * b) == BlockMatrix([[a**2, z], [z, z]])\n+ assert block_collapse(b * b * b) == BlockMatrix([[a**3, z], [z, z]])\n+\n def test_BlockMatrix_trace():\n A, B, C, D = [MatrixSymbol(s, 3, 3) for s in 'ABCD']\n X = BlockMatrix([[A, B], [C, D]])\ndiff --git a/sympy/matrices/expressions/tests/test_matadd.py b/sympy/matrices/expressions/tests/test_matadd.py\n--- a/sympy/matrices/expressions/tests/test_matadd.py\n+++ b/sympy/matrices/expressions/tests/test_matadd.py\n@@ -1,7 +1,8 @@\n from sympy.matrices.expressions import MatrixSymbol, MatAdd, MatPow, MatMul\n-from sympy.matrices.expressions.matexpr import GenericZeroMatrix\n+from sympy.matrices.expressions.matexpr import GenericZeroMatrix, ZeroMatrix\n from sympy.matrices import eye, ImmutableMatrix\n-from sympy.core import Basic, S\n+from sympy.core import Add, Basic, S\n+from sympy.utilities.pytest import XFAIL, raises\n \n X = MatrixSymbol('X', 2, 2)\n Y = MatrixSymbol('Y', 2, 2)\n@@ -30,3 +31,11 @@ def test_doit_args():\n def test_generic_identity():\n assert MatAdd.identity == GenericZeroMatrix()\n assert MatAdd.identity != S.Zero\n+\n+\n+def test_zero_matrix_add():\n+ assert Add(ZeroMatrix(2, 2), ZeroMatrix(2, 2)) == ZeroMatrix(2, 2)\n+\n+@XFAIL\n+def test_matrix_add_with_scalar():\n+ raises(TypeError, lambda: Add(0, ZeroMatrix(2, 2)))\n", + "fail_to_pass": "[\"test_issue_17624\", \"test_zero_matrix_add\"]", + "pass_to_pass": "[\"test_bc_matmul\", \"test_bc_matadd\", \"test_bc_transpose\", \"test_bc_dist_diag\", \"test_block_plus_ident\", \"test_BlockMatrix\", \"test_block_collapse_explicit_matrices\", \"test_BlockMatrix_trace\", \"test_BlockMatrix_Determinant\", \"test_squareBlockMatrix\", \"test_BlockDiagMatrix\", \"test_blockcut\", \"test_reblock_2x2\", \"test_deblock\", \"test_sort_key\", \"test_matadd_sympify\", \"test_matadd_of_matrices\", \"test_doit_args\", \"test_generic_identity\"]", + "expected_spans": { + "sympy/matrices/expressions/matexpr.py": [ + "get_postprocessor" + ] + }, + "test_file_spans": { + "sympy/matrices/expressions/tests/test_blockmatrix.py": [ + "imports", + "test_BlockMatrix_trace" + ], + "sympy/matrices/expressions/tests/test_matadd.py": [ + "imports" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-11797", + "repo": "django/django", + "base_commit": "3346b78a8a872286a245d1e77ef4718fc5e6be1a", + "problem_statement": "Filtering on query result overrides GROUP BY of internal query\nDescription\n\t\nfrom django.contrib.auth import models\na = models.User.objects.filter(email__isnull=True).values('email').annotate(m=Max('id')).values('m')\nprint(a.query) # good\n# SELECT MAX(\"auth_user\".\"id\") AS \"m\" FROM \"auth_user\" WHERE \"auth_user\".\"email\" IS NULL GROUP BY \"auth_user\".\"email\"\nprint(a[:1].query) # good\n# SELECT MAX(\"auth_user\".\"id\") AS \"m\" FROM \"auth_user\" WHERE \"auth_user\".\"email\" IS NULL GROUP BY \"auth_user\".\"email\" LIMIT 1\nb = models.User.objects.filter(id=a[:1])\nprint(b.query) # GROUP BY U0.\"id\" should be GROUP BY U0.\"email\"\n# SELECT ... FROM \"auth_user\" WHERE \"auth_user\".\"id\" = (SELECT U0.\"id\" FROM \"auth_user\" U0 WHERE U0.\"email\" IS NULL GROUP BY U0.\"id\" LIMIT 1)\n", + "golden_patch": "diff --git a/django/db/models/lookups.py b/django/db/models/lookups.py\n--- a/django/db/models/lookups.py\n+++ b/django/db/models/lookups.py\n@@ -262,9 +262,9 @@ def process_rhs(self, compiler, connection):\n from django.db.models.sql.query import Query\n if isinstance(self.rhs, Query):\n if self.rhs.has_limit_one():\n- # The subquery must select only the pk.\n- self.rhs.clear_select_clause()\n- self.rhs.add_fields(['pk'])\n+ if not self.rhs.has_select_fields:\n+ self.rhs.clear_select_clause()\n+ self.rhs.add_fields(['pk'])\n else:\n raise ValueError(\n 'The QuerySet value for an exact lookup must be limited to '\n", + "test_patch": "diff --git a/tests/lookup/tests.py b/tests/lookup/tests.py\n--- a/tests/lookup/tests.py\n+++ b/tests/lookup/tests.py\n@@ -5,6 +5,7 @@\n \n from django.core.exceptions import FieldError\n from django.db import connection\n+from django.db.models import Max\n from django.db.models.expressions import Exists, OuterRef\n from django.db.models.functions import Substr\n from django.test import TestCase, skipUnlessDBFeature\n@@ -956,3 +957,15 @@ def test_nested_outerref_lhs(self):\n ),\n )\n self.assertEqual(qs.get(has_author_alias_match=True), tag)\n+\n+ def test_exact_query_rhs_with_selected_columns(self):\n+ newest_author = Author.objects.create(name='Author 2')\n+ authors_max_ids = Author.objects.filter(\n+ name='Author 2',\n+ ).values(\n+ 'name',\n+ ).annotate(\n+ max_id=Max('id'),\n+ ).values('max_id')\n+ authors = Author.objects.filter(id=authors_max_ids[:1])\n+ self.assertEqual(authors.get(), newest_author)\n", + "fail_to_pass": "[\"test_exact_query_rhs_with_selected_columns (lookup.tests.LookupTests)\"]", + "pass_to_pass": "[\"test_chain_date_time_lookups (lookup.tests.LookupTests)\", \"test_count (lookup.tests.LookupTests)\", \"test_custom_field_none_rhs (lookup.tests.LookupTests)\", \"Lookup.can_use_none_as_rhs=True allows None as a lookup value.\", \"test_error_messages (lookup.tests.LookupTests)\", \"test_escaping (lookup.tests.LookupTests)\", \"test_exact_exists (lookup.tests.LookupTests)\", \"Transforms are used for __exact=None.\", \"test_exact_sliced_queryset_limit_one (lookup.tests.LookupTests)\", \"test_exact_sliced_queryset_limit_one_offset (lookup.tests.LookupTests)\", \"test_exact_sliced_queryset_not_limited_to_one (lookup.tests.LookupTests)\", \"test_exclude (lookup.tests.LookupTests)\", \"test_exists (lookup.tests.LookupTests)\", \"test_get_next_previous_by (lookup.tests.LookupTests)\", \"test_in (lookup.tests.LookupTests)\", \"test_in_bulk (lookup.tests.LookupTests)\", \"test_in_bulk_lots_of_ids (lookup.tests.LookupTests)\", \"test_in_bulk_non_unique_field (lookup.tests.LookupTests)\", \"test_in_bulk_with_field (lookup.tests.LookupTests)\", \"test_in_different_database (lookup.tests.LookupTests)\", \"test_in_keeps_value_ordering (lookup.tests.LookupTests)\", \"test_iterator (lookup.tests.LookupTests)\", \"test_lookup_collision (lookup.tests.LookupTests)\", \"test_lookup_date_as_str (lookup.tests.LookupTests)\", \"test_lookup_int_as_str (lookup.tests.LookupTests)\", \"test_nested_outerref_lhs (lookup.tests.LookupTests)\", \"test_none (lookup.tests.LookupTests)\", \"test_nonfield_lookups (lookup.tests.LookupTests)\", \"test_pattern_lookups_with_substr (lookup.tests.LookupTests)\", \"test_regex (lookup.tests.LookupTests)\", \"test_regex_backreferencing (lookup.tests.LookupTests)\", \"test_regex_non_ascii (lookup.tests.LookupTests)\", \"test_regex_non_string (lookup.tests.LookupTests)\", \"test_regex_null (lookup.tests.LookupTests)\", \"test_relation_nested_lookup_error (lookup.tests.LookupTests)\", \"test_unsupported_lookups (lookup.tests.LookupTests)\", \"test_values (lookup.tests.LookupTests)\", \"test_values_list (lookup.tests.LookupTests)\"]", + "expected_spans": { + "django/db/models/lookups.py": [ + "Exact.process_rhs" + ] + }, + "test_file_spans": { + "tests/lookup/tests.py": [ + "imports" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-11815", + "repo": "django/django", + "base_commit": "e02f67ef2d03d48128e7a118bf75f0418e24e8ac", + "problem_statement": "Migrations uses value of enum object instead of its name.\nDescription\n\t \n\t\t(last modified by oasl)\n\t \nWhen using Enum object as a default value for a CharField, the generated migration file uses the value of the Enum object instead of the its name. This causes a problem when using Django translation on the value of the Enum object. \nThe problem is that, when the Enum object value get translated to the users language, the old migration files raise an error stating that the Enum does not have the corresponding value. (because the Enum value is translated to another language)\nExample:\nLet say we have this code in models.py:\nfrom enum import Enum\nfrom django.utils.translation import gettext_lazy as _\nfrom django.db import models\nclass Status(Enum):\n\tGOOD = _('Good') # 'Good' will be translated\n\tBAD = _('Bad') # 'Bad' will be translated\n\tdef __str__(self):\n\t\treturn self.name\nclass Item(models.Model):\n\tstatus = models.CharField(default=Status.GOOD, max_length=128)\nIn the generated migration file, the code will be:\n...\n('status', models.CharField(default=Status('Good'), max_length=128))\n...\nAfter the translation, 'Good' will be translated to another word and it will not be part of the Status Enum class any more, so the migration file will raise the error on the previous line:\nValueError: 'Good' is not a valid Status\nShouldn't the code generated by the migration uses the name of the Status Enum 'GOOD', not the value of it, since it is changeable?\nIt should be:\n('status', models.CharField(default=Status['GOOD'], max_length=128))\nThis will be correct regardless of the translated word\n", + "golden_patch": "diff --git a/django/db/migrations/serializer.py b/django/db/migrations/serializer.py\n--- a/django/db/migrations/serializer.py\n+++ b/django/db/migrations/serializer.py\n@@ -120,9 +120,10 @@ class EnumSerializer(BaseSerializer):\n def serialize(self):\n enum_class = self.value.__class__\n module = enum_class.__module__\n- v_string, v_imports = serializer_factory(self.value.value).serialize()\n- imports = {'import %s' % module, *v_imports}\n- return \"%s.%s(%s)\" % (module, enum_class.__name__, v_string), imports\n+ return (\n+ '%s.%s[%r]' % (module, enum_class.__name__, self.value.name),\n+ {'import %s' % module},\n+ )\n \n \n class FloatSerializer(BaseSimpleSerializer):\n", + "test_patch": "diff --git a/tests/migrations/test_writer.py b/tests/migrations/test_writer.py\n--- a/tests/migrations/test_writer.py\n+++ b/tests/migrations/test_writer.py\n@@ -257,6 +257,10 @@ class TextEnum(enum.Enum):\n A = 'a-value'\n B = 'value-b'\n \n+ class TextTranslatedEnum(enum.Enum):\n+ A = _('a-value')\n+ B = _('value-b')\n+\n class BinaryEnum(enum.Enum):\n A = b'a-value'\n B = b'value-b'\n@@ -267,15 +271,19 @@ class IntEnum(enum.IntEnum):\n \n self.assertSerializedResultEqual(\n TextEnum.A,\n- (\"migrations.test_writer.TextEnum('a-value')\", {'import migrations.test_writer'})\n+ (\"migrations.test_writer.TextEnum['A']\", {'import migrations.test_writer'})\n+ )\n+ self.assertSerializedResultEqual(\n+ TextTranslatedEnum.A,\n+ (\"migrations.test_writer.TextTranslatedEnum['A']\", {'import migrations.test_writer'})\n )\n self.assertSerializedResultEqual(\n BinaryEnum.A,\n- (\"migrations.test_writer.BinaryEnum(b'a-value')\", {'import migrations.test_writer'})\n+ (\"migrations.test_writer.BinaryEnum['A']\", {'import migrations.test_writer'})\n )\n self.assertSerializedResultEqual(\n IntEnum.B,\n- (\"migrations.test_writer.IntEnum(2)\", {'import migrations.test_writer'})\n+ (\"migrations.test_writer.IntEnum['B']\", {'import migrations.test_writer'})\n )\n \n field = models.CharField(default=TextEnum.B, choices=[(m.value, m) for m in TextEnum])\n@@ -283,27 +291,39 @@ class IntEnum(enum.IntEnum):\n self.assertEqual(\n string,\n \"models.CharField(choices=[\"\n- \"('a-value', migrations.test_writer.TextEnum('a-value')), \"\n- \"('value-b', migrations.test_writer.TextEnum('value-b'))], \"\n- \"default=migrations.test_writer.TextEnum('value-b'))\"\n+ \"('a-value', migrations.test_writer.TextEnum['A']), \"\n+ \"('value-b', migrations.test_writer.TextEnum['B'])], \"\n+ \"default=migrations.test_writer.TextEnum['B'])\"\n+ )\n+ field = models.CharField(\n+ default=TextTranslatedEnum.A,\n+ choices=[(m.value, m) for m in TextTranslatedEnum],\n+ )\n+ string = MigrationWriter.serialize(field)[0]\n+ self.assertEqual(\n+ string,\n+ \"models.CharField(choices=[\"\n+ \"('a-value', migrations.test_writer.TextTranslatedEnum['A']), \"\n+ \"('value-b', migrations.test_writer.TextTranslatedEnum['B'])], \"\n+ \"default=migrations.test_writer.TextTranslatedEnum['A'])\"\n )\n field = models.CharField(default=BinaryEnum.B, choices=[(m.value, m) for m in BinaryEnum])\n string = MigrationWriter.serialize(field)[0]\n self.assertEqual(\n string,\n \"models.CharField(choices=[\"\n- \"(b'a-value', migrations.test_writer.BinaryEnum(b'a-value')), \"\n- \"(b'value-b', migrations.test_writer.BinaryEnum(b'value-b'))], \"\n- \"default=migrations.test_writer.BinaryEnum(b'value-b'))\"\n+ \"(b'a-value', migrations.test_writer.BinaryEnum['A']), \"\n+ \"(b'value-b', migrations.test_writer.BinaryEnum['B'])], \"\n+ \"default=migrations.test_writer.BinaryEnum['B'])\"\n )\n field = models.IntegerField(default=IntEnum.A, choices=[(m.value, m) for m in IntEnum])\n string = MigrationWriter.serialize(field)[0]\n self.assertEqual(\n string,\n \"models.IntegerField(choices=[\"\n- \"(1, migrations.test_writer.IntEnum(1)), \"\n- \"(2, migrations.test_writer.IntEnum(2))], \"\n- \"default=migrations.test_writer.IntEnum(1))\"\n+ \"(1, migrations.test_writer.IntEnum['A']), \"\n+ \"(2, migrations.test_writer.IntEnum['B'])], \"\n+ \"default=migrations.test_writer.IntEnum['A'])\"\n )\n \n def test_serialize_choices(self):\n@@ -454,7 +474,7 @@ def test_serialize_class_based_validators(self):\n # Test a string regex with flag\n validator = RegexValidator(r'^[0-9]+$', flags=re.S)\n string = MigrationWriter.serialize(validator)[0]\n- self.assertEqual(string, \"django.core.validators.RegexValidator('^[0-9]+$', flags=re.RegexFlag(16))\")\n+ self.assertEqual(string, \"django.core.validators.RegexValidator('^[0-9]+$', flags=re.RegexFlag['DOTALL'])\")\n self.serialize_round_trip(validator)\n \n # Test message and code\n", + "fail_to_pass": "[\"test_serialize_class_based_validators (migrations.test_writer.WriterTests)\", \"test_serialize_enums (migrations.test_writer.WriterTests)\"]", + "pass_to_pass": "[\"test_args_kwargs_signature (migrations.test_writer.OperationWriterTests)\", \"test_args_signature (migrations.test_writer.OperationWriterTests)\", \"test_empty_signature (migrations.test_writer.OperationWriterTests)\", \"test_expand_args_signature (migrations.test_writer.OperationWriterTests)\", \"test_kwargs_signature (migrations.test_writer.OperationWriterTests)\", \"test_multiline_args_signature (migrations.test_writer.OperationWriterTests)\", \"test_nested_args_signature (migrations.test_writer.OperationWriterTests)\", \"test_nested_operation_expand_args_signature (migrations.test_writer.OperationWriterTests)\", \"test_custom_operation (migrations.test_writer.WriterTests)\", \"test_deconstruct_class_arguments (migrations.test_writer.WriterTests)\", \"test_migration_file_header_comments (migrations.test_writer.WriterTests)\", \"test_migration_path (migrations.test_writer.WriterTests)\", \"test_models_import_omitted (migrations.test_writer.WriterTests)\", \"test_register_non_serializer (migrations.test_writer.WriterTests)\", \"test_register_serializer (migrations.test_writer.WriterTests)\", \"test_serialize_builtin_types (migrations.test_writer.WriterTests)\", \"test_serialize_builtins (migrations.test_writer.WriterTests)\", \"test_serialize_choices (migrations.test_writer.WriterTests)\", \"test_serialize_collections (migrations.test_writer.WriterTests)\", \"test_serialize_compiled_regex (migrations.test_writer.WriterTests)\", \"test_serialize_constants (migrations.test_writer.WriterTests)\", \"test_serialize_datetime (migrations.test_writer.WriterTests)\", \"test_serialize_empty_nonempty_tuple (migrations.test_writer.WriterTests)\", \"test_serialize_fields (migrations.test_writer.WriterTests)\", \"test_serialize_frozensets (migrations.test_writer.WriterTests)\", \"test_serialize_functions (migrations.test_writer.WriterTests)\", \"test_serialize_functools_partial (migrations.test_writer.WriterTests)\", \"test_serialize_functools_partialmethod (migrations.test_writer.WriterTests)\", \"test_serialize_iterators (migrations.test_writer.WriterTests)\", \"test_serialize_lazy_objects (migrations.test_writer.WriterTests)\", \"A reference in a local scope can't be serialized.\", \"test_serialize_managers (migrations.test_writer.WriterTests)\", \"test_serialize_multiline_strings (migrations.test_writer.WriterTests)\", \"test_serialize_numbers (migrations.test_writer.WriterTests)\", \"test_serialize_range (migrations.test_writer.WriterTests)\", \"test_serialize_set (migrations.test_writer.WriterTests)\", \"test_serialize_settings (migrations.test_writer.WriterTests)\", \"test_serialize_strings (migrations.test_writer.WriterTests)\", \"test_serialize_timedelta (migrations.test_writer.WriterTests)\", \"test_serialize_type_none (migrations.test_writer.WriterTests)\", \"An unbound method used within a class body can be serialized.\", \"test_serialize_uuid (migrations.test_writer.WriterTests)\", \"test_simple_migration (migrations.test_writer.WriterTests)\", \"test_sorted_imports (migrations.test_writer.WriterTests)\"]", + "expected_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + }, + "test_file_spans": { + "tests/migrations/test_writer.py": [ + "WriterTests.test_serialize_enums", + "WriterTests.test_serialize_class_based_validators" + ] + }, + "resolved_by": [ + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + }, + "alternative_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + }, + "alternative_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/db/migrations/serializer.py": [ + "imports", + "BaseSerializer.__init__", + "BaseSerializer.serialize", + "BaseSequenceSerializer._format", + "BaseSequenceSerializer.serialize", + "BaseSimpleSerializer", + "BaseSimpleSerializer.serialize", + "ChoicesSerializer", + "ChoicesSerializer.serialize", + "DateTimeSerializer", + "DateTimeSerializer.serialize", + "DatetimeDatetimeSerializer", + "DatetimeDatetimeSerializer.serialize", + "DecimalSerializer", + "DecimalSerializer.serialize", + "DeconstructableSerializer", + "DeconstructableSerializer.serialize_deconstructed", + "DeconstructableSerializer._serialize_path", + "DictionarySerializer.serialize", + "EnumSerializer", + "EnumSerializer.serialize", + "FloatSerializer", + "FrozensetSerializer._format", + "FunctionTypeSerializer", + "FunctionTypeSerializer.serialize", + "FunctoolsPartialSerializer", + "FunctoolsPartialSerializer.serialize", + "IterableSerializer", + "IterableSerializer.serialize", + "ModelFieldSerializer", + "ModelFieldSerializer.serialize", + "ModelManagerSerializer", + "ModelManagerSerializer.serialize", + "OperationSerializer", + "OperationSerializer.serialize", + "RegexSerializer", + "RegexSerializer.serialize", + "SequenceSerializer", + "SequenceSerializer._format", + "SetSerializer", + "SetSerializer._format", + "SettingsReferenceSerializer", + "SettingsReferenceSerializer.serialize", + "TupleSerializer", + "TupleSerializer._format", + "TypeSerializer", + "TypeSerializer.serialize", + "UUIDSerializer", + "UUIDSerializer.serialize", + "Serializer", + "Serializer.register", + "serializer_factory" + ] + }, + "alternative_spans": { + "django/db/migrations/serializer.py": [ + "imports", + "BaseSerializer.__init__", + "BaseSerializer.serialize", + "BaseSequenceSerializer._format", + "BaseSequenceSerializer.serialize", + "BaseSimpleSerializer", + "BaseSimpleSerializer.serialize", + "ChoicesSerializer", + "ChoicesSerializer.serialize", + "DateTimeSerializer", + "DateTimeSerializer.serialize", + "DatetimeDatetimeSerializer", + "DatetimeDatetimeSerializer.serialize", + "DecimalSerializer", + "DecimalSerializer.serialize", + "DeconstructableSerializer", + "DeconstructableSerializer.serialize_deconstructed", + "DeconstructableSerializer._serialize_path", + "DictionarySerializer.serialize", + "EnumSerializer", + "EnumSerializer.serialize", + "FloatSerializer", + "FrozensetSerializer._format", + "FunctionTypeSerializer", + "FunctionTypeSerializer.serialize", + "FunctoolsPartialSerializer", + "FunctoolsPartialSerializer.serialize", + "IterableSerializer", + "IterableSerializer.serialize", + "ModelFieldSerializer", + "ModelFieldSerializer.serialize", + "ModelManagerSerializer", + "ModelManagerSerializer.serialize", + "OperationSerializer", + "OperationSerializer.serialize", + "RegexSerializer", + "RegexSerializer.serialize", + "SequenceSerializer", + "SequenceSerializer._format", + "SetSerializer", + "SetSerializer._format", + "SettingsReferenceSerializer", + "SettingsReferenceSerializer.serialize", + "TupleSerializer", + "TupleSerializer._format", + "TypeSerializer", + "TypeSerializer.serialize", + "UUIDSerializer", + "UUIDSerializer.serialize", + "Serializer", + "Serializer.register", + "serializer_factory" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + }, + "alternative_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + }, + "alternative_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + }, + "alternative_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + }, + "alternative_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + }, + "alternative_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + }, + "alternative_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + }, + "alternative_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer.serialize", + "FloatSerializer" + ] + }, + "alternative_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer.serialize", + "FloatSerializer" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + }, + "alternative_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + }, + "alternative_spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer", + "EnumSerializer.serialize" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240509_amazon-q-developer-agent-20240430-dev", + "spans": { + "django/db/migrations/serializer.py": [ + "EnumSerializer.serialize", + "FloatSerializer" + ] + } + } + ] + }, + { + "instance_id": "sympy__sympy-17655", + "repo": "sympy/sympy", + "base_commit": "f5e965947af2410ded92cfad987aaf45262ea434", + "problem_statement": "Unexpected exception when multiplying geometry.Point and number\n```python\r\nfrom sympy import geometry as ge\r\nimport sympy\r\n\r\npoint1 = ge.Point(0,0)\r\npoint2 = ge.Point(1,1)\r\n```\r\n\r\nThis line works fine\r\n```python\r\npoint1 + point2 * sympy.sympify(2.0)\r\n```\r\n\r\nBut when I write the same this way it raises an exception\r\n```python\r\npoint1 + sympy.sympify(2.0) * point2\r\n```\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n~/.virtualenvs/test/lib/python3.6/site-packages/sympy/geometry/point.py in __add__(self, other)\r\n 219 try:\r\n--> 220 s, o = Point._normalize_dimension(self, Point(other, evaluate=False))\r\n 221 except TypeError:\r\n\r\n~/.virtualenvs/test/lib/python3.6/site-packages/sympy/geometry/point.py in __new__(cls, *args, **kwargs)\r\n 128 Expecting sequence of coordinates, not `{}`'''\r\n--> 129 .format(func_name(coords))))\r\n 130 # A point where only `dim` is specified is initialized\r\n\r\nTypeError: \r\nExpecting sequence of coordinates, not `Mul`\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nGeometryError Traceback (most recent call last)\r\n in \r\n----> 1 point1 + sympy.sympify(2.0)* point2\r\n\r\n~/.virtualenvs/test/lib/python3.6/site-packages/sympy/geometry/point.py in __add__(self, other)\r\n 220 s, o = Point._normalize_dimension(self, Point(other, evaluate=False))\r\n 221 except TypeError:\r\n--> 222 raise GeometryError(\"Don't know how to add {} and a Point object\".format(other))\r\n 223 \r\n 224 coords = [simplify(a + b) for a, b in zip(s, o)]\r\n\r\nGeometryError: Don't know how to add 2.0*Point2D(1, 1) and a Point object\r\n```\r\n\r\nThe expected behaviour is, that both lines give the same result\n", + "golden_patch": "diff --git a/sympy/geometry/point.py b/sympy/geometry/point.py\n--- a/sympy/geometry/point.py\n+++ b/sympy/geometry/point.py\n@@ -278,6 +278,10 @@ def __mul__(self, factor):\n coords = [simplify(x*factor) for x in self.args]\n return Point(coords, evaluate=False)\n \n+ def __rmul__(self, factor):\n+ \"\"\"Multiply a factor by point's coordinates.\"\"\"\n+ return self.__mul__(factor)\n+\n def __neg__(self):\n \"\"\"Negate the point.\"\"\"\n coords = [-x for x in self.args]\n", + "test_patch": "diff --git a/sympy/geometry/tests/test_point.py b/sympy/geometry/tests/test_point.py\n--- a/sympy/geometry/tests/test_point.py\n+++ b/sympy/geometry/tests/test_point.py\n@@ -26,7 +26,6 @@ def test_point():\n assert p2.y == y2\n assert (p3 + p4) == p4\n assert (p2 - p1) == Point(y1 - x1, y2 - x2)\n- assert p4*5 == Point(5, 5)\n assert -p2 == Point(-y1, -y2)\n raises(ValueError, lambda: Point(3, I))\n raises(ValueError, lambda: Point(2*I, I))\n@@ -92,6 +91,7 @@ def test_point():\n \n assert p4 * 5 == Point(5, 5)\n assert p4 / 5 == Point(0.2, 0.2)\n+ assert 5 * p4 == Point(5, 5)\n \n raises(ValueError, lambda: Point(0, 0) + 10)\n \n@@ -140,7 +140,6 @@ def test_point3D():\n assert p2.y == y2\n assert (p3 + p4) == p4\n assert (p2 - p1) == Point3D(y1 - x1, y2 - x2, y3 - x3)\n- assert p4*5 == Point3D(5, 5, 5)\n assert -p2 == Point3D(-y1, -y2, -y3)\n \n assert Point(34.05, sqrt(3)) == Point(Rational(681, 20), sqrt(3))\n@@ -169,6 +168,7 @@ def test_point3D():\n \n assert p4 * 5 == Point3D(5, 5, 5)\n assert p4 / 5 == Point3D(0.2, 0.2, 0.2)\n+ assert 5 * p4 == Point3D(5, 5, 5)\n \n raises(ValueError, lambda: Point3D(0, 0, 0) + 10)\n \n", + "fail_to_pass": "[\"test_point\", \"test_point3D\"]", + "pass_to_pass": "[\"test_Point2D\", \"test_issue_9214\", \"test_issue_11617\", \"test_transform\", \"test_concyclic_doctest_bug\", \"test_arguments\", \"test_unit\", \"test_dot\", \"test__normalize_dimension\"]", + "expected_spans": { + "sympy/geometry/point.py": [ + "Point.__neg__" + ] + }, + "test_file_spans": { + "sympy/geometry/tests/test_point.py": [ + "test_point", + "test_point3D" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "sympy/geometry/point.py": [ + "Point.__neg__" + ] + }, + "alternative_spans": { + "sympy/geometry/point.py": [ + "Point.__neg__" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "sympy/geometry/point.py": [ + "Point.__sub__" + ] + }, + "alternative_spans": { + "sympy/geometry/point.py": [ + "Point.__sub__" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/geometry/point.py": [ + "Point.__add__", + "Point" + ] + }, + "alternative_spans": { + "sympy/geometry/point.py": [ + "Point.__add__", + "Point" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sympy/geometry/point.py": [ + "Point" + ] + }, + "alternative_spans": { + "sympy/geometry/point.py": [ + "Point" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sympy/geometry/point.py": [ + "Point.__sub__" + ] + }, + "alternative_spans": { + "sympy/geometry/point.py": [ + "Point.__sub__" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "sympy/geometry/point.py": [ + "Point.__mul__" + ] + }, + "alternative_spans": { + "sympy/geometry/point.py": [ + "Point.__mul__" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sympy/geometry/point.py": [ + "Point.__add__", + "Point.__neg__" + ] + }, + "alternative_spans": { + "sympy/geometry/point.py": [ + "Point.__add__", + "Point.__neg__" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sympy/geometry/point.py": [ + "Point.__add__", + "Point.__div__", + "Point.__mul__", + "Point.__sub__" + ] + }, + "alternative_spans": { + "sympy/geometry/point.py": [ + "Point.__add__", + "Point.__div__", + "Point.__mul__", + "Point.__sub__" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "sympy/geometry/point.py": [ + "Point.__mul__" + ] + }, + "alternative_spans": { + "sympy/geometry/point.py": [ + "Point.__mul__" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sympy/geometry/point.py": [ + "Point.__neg__" + ] + }, + "alternative_spans": { + "sympy/geometry/point.py": [ + "Point.__neg__" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "sympy/geometry/point.py": [ + "Point.__new__", + "Point.__neg__" + ] + }, + "alternative_spans": { + "sympy/geometry/point.py": [ + "Point.__new__", + "Point.__neg__" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sympy/geometry/point.py": [ + "Point.__mul__" + ] + }, + "alternative_spans": { + "sympy/geometry/point.py": [ + "Point.__mul__" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sympy/geometry/point.py": [ + "imports", + "Point.__add__", + "Point.__neg__" + ], + "sympy/geometry/tests/test_point.py": [ + "test_point" + ] + }, + "alternative_spans": { + "sympy/geometry/point.py": [ + "imports", + "Point.__add__", + "Point.__neg__" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240630_agentless_gpt4o", + "spans": { + "sympy/geometry/point.py": [ + "Point.__sub__" + ] + } + }, + { + "run_name": "20240702_codestory_aide_mixed", + "spans": { + "sympy/geometry/point.py": [ + "Point.__add__", + "Point" + ] + } + }, + { + "run_name": "20240806_SuperCoder2.0", + "spans": { + "sympy/geometry/point.py": [ + "Point" + ] + } + }, + { + "run_name": "20240829_Isoform", + "spans": { + "sympy/geometry/point.py": [ + "Point.__sub__" + ] + } + }, + { + "run_name": "20240604_CodeR", + "spans": { + "sympy/geometry/point.py": [ + "Point.__mul__" + ] + } + }, + { + "run_name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "spans": { + "sympy/geometry/point.py": [ + "Point.__add__", + "Point.__div__", + "Point.__mul__", + "Point.__sub__" + ] + } + }, + { + "run_name": "20240612_IBM_Research_Agent101", + "spans": { + "sympy/geometry/point.py": [ + "Point.__mul__" + ] + } + }, + { + "run_name": "20240811_gru", + "spans": { + "sympy/geometry/point.py": [ + "Point.__mul__" + ] + } + } + ] + }, + { + "instance_id": "django__django-11848", + "repo": "django/django", + "base_commit": "f0adf3b9b7a19cdee05368ff0c0c2d087f011180", + "problem_statement": "django.utils.http.parse_http_date two digit year check is incorrect\nDescription\n\t \n\t\t(last modified by Ad Timmering)\n\t \nRFC 850 does not mention this, but in RFC 7231 (and there's something similar in RFC 2822), there's the following quote:\nRecipients of a timestamp value in rfc850-date format, which uses a\ntwo-digit year, MUST interpret a timestamp that appears to be more\nthan 50 years in the future as representing the most recent year in\nthe past that had the same last two digits.\nCurrent logic is hard coded to consider 0-69 to be in 2000-2069, and 70-99 to be 1970-1999, instead of comparing versus the current year.\n", + "golden_patch": "diff --git a/django/utils/http.py b/django/utils/http.py\n--- a/django/utils/http.py\n+++ b/django/utils/http.py\n@@ -176,10 +176,14 @@ def parse_http_date(date):\n try:\n year = int(m.group('year'))\n if year < 100:\n- if year < 70:\n- year += 2000\n+ current_year = datetime.datetime.utcnow().year\n+ current_century = current_year - (current_year % 100)\n+ if year - (current_year % 100) > 50:\n+ # year that appears to be more than 50 years in the future are\n+ # interpreted as representing the past.\n+ year += current_century - 100\n else:\n- year += 1900\n+ year += current_century\n month = MONTHS.index(m.group('mon').lower()) + 1\n day = int(m.group('day'))\n hour = int(m.group('hour'))\n", + "test_patch": "diff --git a/tests/utils_tests/test_http.py b/tests/utils_tests/test_http.py\n--- a/tests/utils_tests/test_http.py\n+++ b/tests/utils_tests/test_http.py\n@@ -1,5 +1,6 @@\n import unittest\n from datetime import datetime\n+from unittest import mock\n \n from django.test import SimpleTestCase, ignore_warnings\n from django.utils.datastructures import MultiValueDict\n@@ -316,9 +317,27 @@ def test_parsing_rfc1123(self):\n parsed = parse_http_date('Sun, 06 Nov 1994 08:49:37 GMT')\n self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))\n \n- def test_parsing_rfc850(self):\n- parsed = parse_http_date('Sunday, 06-Nov-94 08:49:37 GMT')\n- self.assertEqual(datetime.utcfromtimestamp(parsed), datetime(1994, 11, 6, 8, 49, 37))\n+ @mock.patch('django.utils.http.datetime.datetime')\n+ def test_parsing_rfc850(self, mocked_datetime):\n+ mocked_datetime.side_effect = datetime\n+ mocked_datetime.utcnow = mock.Mock()\n+ utcnow_1 = datetime(2019, 11, 6, 8, 49, 37)\n+ utcnow_2 = datetime(2020, 11, 6, 8, 49, 37)\n+ utcnow_3 = datetime(2048, 11, 6, 8, 49, 37)\n+ tests = (\n+ (utcnow_1, 'Tuesday, 31-Dec-69 08:49:37 GMT', datetime(2069, 12, 31, 8, 49, 37)),\n+ (utcnow_1, 'Tuesday, 10-Nov-70 08:49:37 GMT', datetime(1970, 11, 10, 8, 49, 37)),\n+ (utcnow_1, 'Sunday, 06-Nov-94 08:49:37 GMT', datetime(1994, 11, 6, 8, 49, 37)),\n+ (utcnow_2, 'Wednesday, 31-Dec-70 08:49:37 GMT', datetime(2070, 12, 31, 8, 49, 37)),\n+ (utcnow_2, 'Friday, 31-Dec-71 08:49:37 GMT', datetime(1971, 12, 31, 8, 49, 37)),\n+ (utcnow_3, 'Sunday, 31-Dec-00 08:49:37 GMT', datetime(2000, 12, 31, 8, 49, 37)),\n+ (utcnow_3, 'Friday, 31-Dec-99 08:49:37 GMT', datetime(1999, 12, 31, 8, 49, 37)),\n+ )\n+ for utcnow, rfc850str, expected_date in tests:\n+ with self.subTest(rfc850str=rfc850str):\n+ mocked_datetime.utcnow.return_value = utcnow\n+ parsed = parse_http_date(rfc850str)\n+ self.assertEqual(datetime.utcfromtimestamp(parsed), expected_date)\n \n def test_parsing_asctime(self):\n parsed = parse_http_date('Sun Nov 6 08:49:37 1994')\n", + "fail_to_pass": "[\"test_parsing_rfc850 (utils_tests.test_http.HttpDateProcessingTests)\", \"test_parsing_year_less_than_70 (utils_tests.test_http.HttpDateProcessingTests)\"]", + "pass_to_pass": "[\"test_input_too_large (utils_tests.test_http.Base36IntTests)\", \"test_invalid_literal (utils_tests.test_http.Base36IntTests)\", \"test_negative_input (utils_tests.test_http.Base36IntTests)\", \"test_roundtrip (utils_tests.test_http.Base36IntTests)\", \"test_to_base36_errors (utils_tests.test_http.Base36IntTests)\", \"test_to_int_errors (utils_tests.test_http.Base36IntTests)\", \"test_values (utils_tests.test_http.Base36IntTests)\", \"test (utils_tests.test_http.EscapeLeadingSlashesTests)\", \"test_quote (utils_tests.test_http.URLQuoteTests)\", \"test_quote_plus (utils_tests.test_http.URLQuoteTests)\", \"test_unquote (utils_tests.test_http.URLQuoteTests)\", \"test_unquote_plus (utils_tests.test_http.URLQuoteTests)\", \"test_parsing (utils_tests.test_http.ETagProcessingTests)\", \"test_quoting (utils_tests.test_http.ETagProcessingTests)\", \"test_allowed_hosts_str (utils_tests.test_http.IsSafeURLTests)\", \"test_bad_urls (utils_tests.test_http.IsSafeURLTests)\", \"test_basic_auth (utils_tests.test_http.IsSafeURLTests)\", \"test_good_urls (utils_tests.test_http.IsSafeURLTests)\", \"test_is_safe_url_deprecated (utils_tests.test_http.IsSafeURLTests)\", \"test_no_allowed_hosts (utils_tests.test_http.IsSafeURLTests)\", \"test_secure_param_https_urls (utils_tests.test_http.IsSafeURLTests)\", \"test_secure_param_non_https_urls (utils_tests.test_http.IsSafeURLTests)\", \"test_bad (utils_tests.test_http.IsSameDomainTests)\", \"test_good (utils_tests.test_http.IsSameDomainTests)\", \"test_roundtrip (utils_tests.test_http.URLSafeBase64Tests)\", \"test_http_date (utils_tests.test_http.HttpDateProcessingTests)\", \"test_parsing_asctime (utils_tests.test_http.HttpDateProcessingTests)\", \"test_parsing_rfc1123 (utils_tests.test_http.HttpDateProcessingTests)\", \"test_custom_iterable_not_doseq (utils_tests.test_http.URLEncodeTests)\", \"test_dict (utils_tests.test_http.URLEncodeTests)\", \"test_dict_containing_empty_sequence_doseq (utils_tests.test_http.URLEncodeTests)\", \"test_dict_containing_sequence_doseq (utils_tests.test_http.URLEncodeTests)\", \"test_dict_containing_sequence_not_doseq (utils_tests.test_http.URLEncodeTests)\", \"test_dict_containing_tuple_not_doseq (utils_tests.test_http.URLEncodeTests)\", \"test_dict_with_bytearray (utils_tests.test_http.URLEncodeTests)\", \"test_dict_with_bytes_values (utils_tests.test_http.URLEncodeTests)\", \"test_dict_with_sequence_of_bytes (utils_tests.test_http.URLEncodeTests)\", \"test_generator (utils_tests.test_http.URLEncodeTests)\", \"test_multivaluedict (utils_tests.test_http.URLEncodeTests)\", \"test_none (utils_tests.test_http.URLEncodeTests)\", \"test_none_in_generator (utils_tests.test_http.URLEncodeTests)\", \"test_none_in_sequence (utils_tests.test_http.URLEncodeTests)\", \"test_tuples (utils_tests.test_http.URLEncodeTests)\"]", + "expected_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + }, + "test_file_spans": { + "tests/utils_tests/test_http.py": [ + "imports", + "HttpDateProcessingTests.test_parsing_rfc850" + ] + }, + "resolved_by": [ + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + }, + "alternative_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + }, + "alternative_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + }, + "alternative_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + }, + "alternative_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/utils/http.py": [ + "imports", + "urlquote", + "urlquote_plus", + "urlunquote", + "urlunquote_plus", + "urlencode", + "parse_http_date", + "parse_http_date_safe", + "base36_to_int", + "int_to_base36", + "parse_etags", + "is_same_domain", + "url_has_allowed_host_and_scheme", + "is_safe_url", + "_urlparse", + "_urlsplit", + "_url_has_allowed_host_and_scheme", + "limited_parse_qsl" + ] + }, + "alternative_spans": { + "django/utils/http.py": [ + "imports", + "urlquote", + "urlquote_plus", + "urlunquote", + "urlunquote_plus", + "urlencode", + "parse_http_date", + "parse_http_date_safe", + "base36_to_int", + "int_to_base36", + "parse_etags", + "is_same_domain", + "url_has_allowed_host_and_scheme", + "is_safe_url", + "_urlparse", + "_urlsplit", + "_url_has_allowed_host_and_scheme", + "limited_parse_qsl" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + }, + "alternative_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + }, + "alternative_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/utils/http.py": [ + "http_date", + "parse_http_date" + ] + }, + "alternative_spans": { + "django/utils/http.py": [ + "http_date", + "parse_http_date" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + }, + "alternative_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + }, + "alternative_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + }, + "alternative_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + }, + "alternative_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + }, + "alternative_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + }, + "alternative_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + }, + "alternative_spans": { + "django/utils/http.py": [ + "parse_http_date" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "pydata__xarray-3364", + "repo": "pydata/xarray", + "base_commit": "863e49066ca4d61c9adfe62aca3bf21b90e1af8c", + "problem_statement": "Ignore missing variables when concatenating datasets?\nSeveral users (@raj-kesavan, @richardotis, now myself) have wondered about how to concatenate xray Datasets with different variables.\n\nWith the current `xray.concat`, you need to awkwardly create dummy variables filled with `NaN` in datasets that don't have them (or drop mismatched variables entirely). Neither of these are great options -- `concat` should have an option (the default?) to take care of this for the user.\n\nThis would also be more consistent with `pd.concat`, which takes a more relaxed approach to matching dataframes with different variables (it does an outer join).\n\n", + "golden_patch": "diff --git a/xarray/core/concat.py b/xarray/core/concat.py\n--- a/xarray/core/concat.py\n+++ b/xarray/core/concat.py\n@@ -312,15 +312,9 @@ def _dataset_concat(\n to_merge = {var: [] for var in variables_to_merge}\n \n for ds in datasets:\n- absent_merge_vars = variables_to_merge - set(ds.variables)\n- if absent_merge_vars:\n- raise ValueError(\n- \"variables %r are present in some datasets but not others. \"\n- % absent_merge_vars\n- )\n-\n for var in variables_to_merge:\n- to_merge[var].append(ds.variables[var])\n+ if var in ds:\n+ to_merge[var].append(ds.variables[var])\n \n for var in variables_to_merge:\n result_vars[var] = unique_variable(\n", + "test_patch": "diff --git a/xarray/tests/test_combine.py b/xarray/tests/test_combine.py\n--- a/xarray/tests/test_combine.py\n+++ b/xarray/tests/test_combine.py\n@@ -782,12 +782,11 @@ def test_auto_combine_previously_failed(self):\n actual = auto_combine(datasets, concat_dim=\"t\")\n assert_identical(expected, actual)\n \n- def test_auto_combine_still_fails(self):\n- # concat can't handle new variables (yet):\n- # https://github.com/pydata/xarray/issues/508\n+ def test_auto_combine_with_new_variables(self):\n datasets = [Dataset({\"x\": 0}, {\"y\": 0}), Dataset({\"x\": 1}, {\"y\": 1, \"z\": 1})]\n- with pytest.raises(ValueError):\n- auto_combine(datasets, \"y\")\n+ actual = auto_combine(datasets, \"y\")\n+ expected = Dataset({\"x\": (\"y\", [0, 1])}, {\"y\": [0, 1], \"z\": 1})\n+ assert_identical(expected, actual)\n \n def test_auto_combine_no_concat(self):\n objs = [Dataset({\"x\": 0}), Dataset({\"y\": 1})]\ndiff --git a/xarray/tests/test_concat.py b/xarray/tests/test_concat.py\n--- a/xarray/tests/test_concat.py\n+++ b/xarray/tests/test_concat.py\n@@ -68,6 +68,22 @@ def test_concat_simple(self, data, dim, coords):\n datasets = [g for _, g in data.groupby(dim, squeeze=False)]\n assert_identical(data, concat(datasets, dim, coords=coords))\n \n+ def test_concat_merge_variables_present_in_some_datasets(self, data):\n+ # coordinates present in some datasets but not others\n+ ds1 = Dataset(data_vars={\"a\": (\"y\", [0.1])}, coords={\"x\": 0.1})\n+ ds2 = Dataset(data_vars={\"a\": (\"y\", [0.2])}, coords={\"z\": 0.2})\n+ actual = concat([ds1, ds2], dim=\"y\", coords=\"minimal\")\n+ expected = Dataset({\"a\": (\"y\", [0.1, 0.2])}, coords={\"x\": 0.1, \"z\": 0.2})\n+ assert_identical(expected, actual)\n+\n+ # data variables present in some datasets but not others\n+ split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))]\n+ data0, data1 = deepcopy(split_data)\n+ data1[\"foo\"] = (\"bar\", np.random.randn(10))\n+ actual = concat([data0, data1], \"dim1\")\n+ expected = data.copy().assign(foo=data1.foo)\n+ assert_identical(expected, actual)\n+\n def test_concat_2(self, data):\n dim = \"dim2\"\n datasets = [g for _, g in data.groupby(dim, squeeze=True)]\n@@ -190,11 +206,6 @@ def test_concat_errors(self):\n concat([data0, data1], \"dim1\", compat=\"identical\")\n assert_identical(data, concat([data0, data1], \"dim1\", compat=\"equals\"))\n \n- with raises_regex(ValueError, \"present in some datasets\"):\n- data0, data1 = deepcopy(split_data)\n- data1[\"foo\"] = (\"bar\", np.random.randn(10))\n- concat([data0, data1], \"dim1\")\n-\n with raises_regex(ValueError, \"compat.* invalid\"):\n concat(split_data, \"dim1\", compat=\"foobar\")\n \n", + "fail_to_pass": "[\"xarray/tests/test_combine.py::TestAutoCombineOldAPI::test_auto_combine_with_new_variables\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_merge_variables_present_in_some_datasets\"]", + "pass_to_pass": "[\"xarray/tests/test_combine.py::TestTileIDsFromNestedList::test_1d\", \"xarray/tests/test_combine.py::TestTileIDsFromNestedList::test_2d\", \"xarray/tests/test_combine.py::TestTileIDsFromNestedList::test_3d\", \"xarray/tests/test_combine.py::TestTileIDsFromNestedList::test_single_dataset\", \"xarray/tests/test_combine.py::TestTileIDsFromNestedList::test_redundant_nesting\", \"xarray/tests/test_combine.py::TestTileIDsFromNestedList::test_ignore_empty_list\", \"xarray/tests/test_combine.py::TestTileIDsFromNestedList::test_uneven_depth_input\", \"xarray/tests/test_combine.py::TestTileIDsFromNestedList::test_uneven_length_input\", \"xarray/tests/test_combine.py::TestTileIDsFromNestedList::test_infer_from_datasets\", \"xarray/tests/test_combine.py::TestTileIDsFromCoords::test_1d\", \"xarray/tests/test_combine.py::TestTileIDsFromCoords::test_2d\", \"xarray/tests/test_combine.py::TestTileIDsFromCoords::test_no_dimension_coords\", \"xarray/tests/test_combine.py::TestTileIDsFromCoords::test_coord_not_monotonic\", \"xarray/tests/test_combine.py::TestTileIDsFromCoords::test_coord_monotonically_decreasing\", \"xarray/tests/test_combine.py::TestTileIDsFromCoords::test_no_concatenation_needed\", \"xarray/tests/test_combine.py::TestTileIDsFromCoords::test_2d_plus_bystander_dim\", \"xarray/tests/test_combine.py::TestTileIDsFromCoords::test_string_coords\", \"xarray/tests/test_combine.py::TestTileIDsFromCoords::test_lexicographic_sort_string_coords\", \"xarray/tests/test_combine.py::TestTileIDsFromCoords::test_datetime_coords\", \"xarray/tests/test_combine.py::TestNewTileIDs::test_new_tile_id[old_id0-new_id0]\", \"xarray/tests/test_combine.py::TestNewTileIDs::test_new_tile_id[old_id1-new_id1]\", \"xarray/tests/test_combine.py::TestNewTileIDs::test_new_tile_id[old_id2-new_id2]\", \"xarray/tests/test_combine.py::TestNewTileIDs::test_new_tile_id[old_id3-new_id3]\", \"xarray/tests/test_combine.py::TestNewTileIDs::test_new_tile_id[old_id4-new_id4]\", \"xarray/tests/test_combine.py::TestNewTileIDs::test_get_new_tile_ids\", \"xarray/tests/test_combine.py::TestCombineND::test_concat_once[dim1]\", \"xarray/tests/test_combine.py::TestCombineND::test_concat_once[new_dim]\", \"xarray/tests/test_combine.py::TestCombineND::test_concat_only_first_dim\", \"xarray/tests/test_combine.py::TestCombineND::test_concat_twice[dim1]\", \"xarray/tests/test_combine.py::TestCombineND::test_concat_twice[new_dim]\", \"xarray/tests/test_combine.py::TestCheckShapeTileIDs::test_check_depths\", \"xarray/tests/test_combine.py::TestCheckShapeTileIDs::test_check_lengths\", \"xarray/tests/test_combine.py::TestNestedCombine::test_nested_concat\", \"xarray/tests/test_combine.py::TestNestedCombine::test_combine_nested_join[outer-expected0]\", \"xarray/tests/test_combine.py::TestNestedCombine::test_combine_nested_join[inner-expected1]\", \"xarray/tests/test_combine.py::TestNestedCombine::test_combine_nested_join[left-expected2]\", \"xarray/tests/test_combine.py::TestNestedCombine::test_combine_nested_join[right-expected3]\", \"xarray/tests/test_combine.py::TestNestedCombine::test_combine_nested_join_exact\", \"xarray/tests/test_combine.py::TestNestedCombine::test_empty_input\", \"xarray/tests/test_combine.py::TestNestedCombine::test_nested_concat_along_new_dim\", \"xarray/tests/test_combine.py::TestNestedCombine::test_nested_merge\", \"xarray/tests/test_combine.py::TestNestedCombine::test_concat_multiple_dims\", \"xarray/tests/test_combine.py::TestNestedCombine::test_concat_name_symmetry\", \"xarray/tests/test_combine.py::TestNestedCombine::test_concat_one_dim_merge_another\", \"xarray/tests/test_combine.py::TestNestedCombine::test_auto_combine_2d\", \"xarray/tests/test_combine.py::TestNestedCombine::test_combine_nested_missing_data_new_dim\", \"xarray/tests/test_combine.py::TestNestedCombine::test_invalid_hypercube_input\", \"xarray/tests/test_combine.py::TestNestedCombine::test_merge_one_dim_concat_another\", \"xarray/tests/test_combine.py::TestNestedCombine::test_combine_concat_over_redundant_nesting\", \"xarray/tests/test_combine.py::TestNestedCombine::test_combine_nested_fill_value[fill_value0]\", \"xarray/tests/test_combine.py::TestNestedCombine::test_combine_nested_fill_value[2]\", \"xarray/tests/test_combine.py::TestNestedCombine::test_combine_nested_fill_value[2.0]\", \"xarray/tests/test_combine.py::TestCombineAuto::test_combine_by_coords\", \"xarray/tests/test_combine.py::TestCombineAuto::test_combine_coords_join[outer-expected0]\", \"xarray/tests/test_combine.py::TestCombineAuto::test_combine_coords_join[inner-expected1]\", \"xarray/tests/test_combine.py::TestCombineAuto::test_combine_coords_join[left-expected2]\", \"xarray/tests/test_combine.py::TestCombineAuto::test_combine_coords_join[right-expected3]\", \"xarray/tests/test_combine.py::TestCombineAuto::test_combine_coords_join_exact\", \"xarray/tests/test_combine.py::TestCombineAuto::test_infer_order_from_coords\", \"xarray/tests/test_combine.py::TestCombineAuto::test_combine_leaving_bystander_dimensions\", \"xarray/tests/test_combine.py::TestCombineAuto::test_combine_by_coords_previously_failed\", \"xarray/tests/test_combine.py::TestCombineAuto::test_combine_by_coords_still_fails\", \"xarray/tests/test_combine.py::TestCombineAuto::test_combine_by_coords_no_concat\", \"xarray/tests/test_combine.py::TestCombineAuto::test_check_for_impossible_ordering\", \"xarray/tests/test_combine.py::TestAutoCombineOldAPI::test_auto_combine\", \"xarray/tests/test_combine.py::TestAutoCombineOldAPI::test_auto_combine_previously_failed\", \"xarray/tests/test_combine.py::TestAutoCombineOldAPI::test_auto_combine_no_concat\", \"xarray/tests/test_combine.py::TestAutoCombineOldAPI::test_auto_combine_order_by_appearance_not_coords\", \"xarray/tests/test_combine.py::TestAutoCombineOldAPI::test_auto_combine_fill_value[fill_value0]\", \"xarray/tests/test_combine.py::TestAutoCombineOldAPI::test_auto_combine_fill_value[2]\", \"xarray/tests/test_combine.py::TestAutoCombineOldAPI::test_auto_combine_fill_value[2.0]\", \"xarray/tests/test_combine.py::TestAutoCombineDeprecation::test_auto_combine_with_concat_dim\", \"xarray/tests/test_combine.py::TestAutoCombineDeprecation::test_auto_combine_with_merge_and_concat\", \"xarray/tests/test_combine.py::TestAutoCombineDeprecation::test_auto_combine_with_coords\", \"xarray/tests/test_combine.py::TestAutoCombineDeprecation::test_auto_combine_without_coords\", \"xarray/tests/test_concat.py::test_concat_compat\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_simple[dim1-different]\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_simple[dim1-minimal]\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_simple[dim2-different]\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_simple[dim2-minimal]\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_2\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_coords_kwarg[dim1-different]\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_coords_kwarg[dim1-minimal]\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_coords_kwarg[dim1-all]\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_coords_kwarg[dim2-different]\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_coords_kwarg[dim2-minimal]\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_coords_kwarg[dim2-all]\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_dim_precedence\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_data_vars\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_coords\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_constant_index\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_size0\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_autoalign\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_errors\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_join_kwarg\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_promote_shape\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_do_not_promote\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_dim_is_variable\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_multiindex\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_fill_value[fill_value0]\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_fill_value[2]\", \"xarray/tests/test_concat.py::TestConcatDataset::test_concat_fill_value[2.0]\", \"xarray/tests/test_concat.py::TestConcatDataArray::test_concat\", \"xarray/tests/test_concat.py::TestConcatDataArray::test_concat_encoding\", \"xarray/tests/test_concat.py::TestConcatDataArray::test_concat_lazy\", \"xarray/tests/test_concat.py::TestConcatDataArray::test_concat_fill_value[fill_value0]\", \"xarray/tests/test_concat.py::TestConcatDataArray::test_concat_fill_value[2]\", \"xarray/tests/test_concat.py::TestConcatDataArray::test_concat_fill_value[2.0]\", \"xarray/tests/test_concat.py::TestConcatDataArray::test_concat_join_kwarg\"]", + "expected_spans": { + "xarray/core/concat.py": [ + "_dataset_concat" + ] + }, + "test_file_spans": { + "xarray/tests/test_combine.py": [ + "TestAutoCombineOldAPI.test_auto_combine_still_fails" + ], + "xarray/tests/test_concat.py": [ + "TestConcatDataset.test_concat_2", + "TestConcatDataset.test_concat_errors" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-11905", + "repo": "django/django", + "base_commit": "2f72480fbd27896c986c45193e1603e35c0b19a7", + "problem_statement": "Prevent using __isnull lookup with non-boolean value.\nDescription\n\t \n\t\t(last modified by Mariusz Felisiak)\n\t \n__isnull should not allow for non-boolean values. Using truthy/falsey doesn't promote INNER JOIN to an OUTER JOIN but works fine for a simple queries. Using non-boolean values is \u200bundocumented and untested. IMO we should raise an error for non-boolean values to avoid confusion and for consistency.\n", + "golden_patch": "diff --git a/django/db/models/lookups.py b/django/db/models/lookups.py\n--- a/django/db/models/lookups.py\n+++ b/django/db/models/lookups.py\n@@ -1,5 +1,6 @@\n import itertools\n import math\n+import warnings\n from copy import copy\n \n from django.core.exceptions import EmptyResultSet\n@@ -9,6 +10,7 @@\n )\n from django.db.models.query_utils import RegisterLookupMixin\n from django.utils.datastructures import OrderedSet\n+from django.utils.deprecation import RemovedInDjango40Warning\n from django.utils.functional import cached_property\n \n \n@@ -463,6 +465,17 @@ class IsNull(BuiltinLookup):\n prepare_rhs = False\n \n def as_sql(self, compiler, connection):\n+ if not isinstance(self.rhs, bool):\n+ # When the deprecation ends, replace with:\n+ # raise ValueError(\n+ # 'The QuerySet value for an isnull lookup must be True or '\n+ # 'False.'\n+ # )\n+ warnings.warn(\n+ 'Using a non-boolean value for an isnull lookup is '\n+ 'deprecated, use True or False instead.',\n+ RemovedInDjango40Warning,\n+ )\n sql, params = compiler.compile(self.lhs)\n if self.rhs:\n return \"%s IS NULL\" % sql, params\n", + "test_patch": "diff --git a/tests/lookup/models.py b/tests/lookup/models.py\n--- a/tests/lookup/models.py\n+++ b/tests/lookup/models.py\n@@ -96,3 +96,15 @@ class Product(models.Model):\n class Stock(models.Model):\n product = models.ForeignKey(Product, models.CASCADE)\n qty_available = models.DecimalField(max_digits=6, decimal_places=2)\n+\n+\n+class Freebie(models.Model):\n+ gift_product = models.ForeignKey(Product, models.CASCADE)\n+ stock_id = models.IntegerField(blank=True, null=True)\n+\n+ stock = models.ForeignObject(\n+ Stock,\n+ from_fields=['stock_id', 'gift_product'],\n+ to_fields=['id', 'product'],\n+ on_delete=models.CASCADE,\n+ )\ndiff --git a/tests/lookup/tests.py b/tests/lookup/tests.py\n--- a/tests/lookup/tests.py\n+++ b/tests/lookup/tests.py\n@@ -9,9 +9,10 @@\n from django.db.models.expressions import Exists, OuterRef\n from django.db.models.functions import Substr\n from django.test import TestCase, skipUnlessDBFeature\n+from django.utils.deprecation import RemovedInDjango40Warning\n \n from .models import (\n- Article, Author, Game, IsNullWithNoneAsRHS, Player, Season, Tag,\n+ Article, Author, Freebie, Game, IsNullWithNoneAsRHS, Player, Season, Tag,\n )\n \n \n@@ -969,3 +970,24 @@ def test_exact_query_rhs_with_selected_columns(self):\n ).values('max_id')\n authors = Author.objects.filter(id=authors_max_ids[:1])\n self.assertEqual(authors.get(), newest_author)\n+\n+ def test_isnull_non_boolean_value(self):\n+ # These tests will catch ValueError in Django 4.0 when using\n+ # non-boolean values for an isnull lookup becomes forbidden.\n+ # msg = (\n+ # 'The QuerySet value for an isnull lookup must be True or False.'\n+ # )\n+ msg = (\n+ 'Using a non-boolean value for an isnull lookup is deprecated, '\n+ 'use True or False instead.'\n+ )\n+ tests = [\n+ Author.objects.filter(alias__isnull=1),\n+ Article.objects.filter(author__isnull=1),\n+ Season.objects.filter(games__isnull=1),\n+ Freebie.objects.filter(stock__isnull=1),\n+ ]\n+ for qs in tests:\n+ with self.subTest(qs=qs):\n+ with self.assertWarnsMessage(RemovedInDjango40Warning, msg):\n+ qs.exists()\n", + "fail_to_pass": "[\"test_isnull_non_boolean_value (lookup.tests.LookupTests)\", \"test_iterator (lookup.tests.LookupTests)\"]", + "pass_to_pass": "[\"test_chain_date_time_lookups (lookup.tests.LookupTests)\", \"test_count (lookup.tests.LookupTests)\", \"test_custom_field_none_rhs (lookup.tests.LookupTests)\", \"Lookup.can_use_none_as_rhs=True allows None as a lookup value.\", \"test_error_messages (lookup.tests.LookupTests)\", \"test_escaping (lookup.tests.LookupTests)\", \"test_exact_exists (lookup.tests.LookupTests)\", \"Transforms are used for __exact=None.\", \"test_exact_query_rhs_with_selected_columns (lookup.tests.LookupTests)\", \"test_exact_sliced_queryset_limit_one (lookup.tests.LookupTests)\", \"test_exact_sliced_queryset_limit_one_offset (lookup.tests.LookupTests)\", \"test_exact_sliced_queryset_not_limited_to_one (lookup.tests.LookupTests)\", \"test_exclude (lookup.tests.LookupTests)\", \"test_exists (lookup.tests.LookupTests)\", \"test_get_next_previous_by (lookup.tests.LookupTests)\", \"test_in (lookup.tests.LookupTests)\", \"test_in_bulk (lookup.tests.LookupTests)\", \"test_in_bulk_lots_of_ids (lookup.tests.LookupTests)\", \"test_in_bulk_non_unique_field (lookup.tests.LookupTests)\", \"test_in_bulk_with_field (lookup.tests.LookupTests)\", \"test_in_different_database (lookup.tests.LookupTests)\", \"test_in_keeps_value_ordering (lookup.tests.LookupTests)\", \"test_lookup_collision (lookup.tests.LookupTests)\", \"test_lookup_date_as_str (lookup.tests.LookupTests)\", \"test_lookup_int_as_str (lookup.tests.LookupTests)\", \"test_nested_outerref_lhs (lookup.tests.LookupTests)\", \"test_none (lookup.tests.LookupTests)\", \"test_nonfield_lookups (lookup.tests.LookupTests)\", \"test_pattern_lookups_with_substr (lookup.tests.LookupTests)\", \"test_regex (lookup.tests.LookupTests)\", \"test_regex_backreferencing (lookup.tests.LookupTests)\", \"test_regex_non_ascii (lookup.tests.LookupTests)\", \"test_regex_non_string (lookup.tests.LookupTests)\", \"test_regex_null (lookup.tests.LookupTests)\", \"test_relation_nested_lookup_error (lookup.tests.LookupTests)\", \"test_unsupported_lookups (lookup.tests.LookupTests)\", \"test_values (lookup.tests.LookupTests)\", \"test_values_list (lookup.tests.LookupTests)\"]", + "expected_spans": { + "django/db/models/lookups.py": [ + "imports", + "IsNull.as_sql" + ] + }, + "test_file_spans": { + "tests/lookup/models.py": [], + "tests/lookup/tests.py": [ + "imports" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-11910", + "repo": "django/django", + "base_commit": "d232fd76a85870daf345fd8f8d617fe7802ae194", + "problem_statement": "ForeignKey's to_field parameter gets the old field's name when renaming a PrimaryKey.\nDescription\n\t\nHaving these two models \nclass ModelA(models.Model):\n\tfield_wrong = models.CharField('field1', max_length=50, primary_key=True) # I'm a Primary key.\nclass ModelB(models.Model):\n\tfield_fk = models.ForeignKey(ModelA, blank=True, null=True, on_delete=models.CASCADE) \n... migrations applyed ...\nthe ModelA.field_wrong field has been renamed ... and Django recognizes the \"renaming\"\n# Primary key renamed\nclass ModelA(models.Model):\n\tfield_fixed = models.CharField('field1', max_length=50, primary_key=True) # I'm a Primary key.\nAttempts to to_field parameter. \nThe to_field points to the old_name (field_typo) and not to the new one (\"field_fixed\")\nclass Migration(migrations.Migration):\n\tdependencies = [\n\t\t('app1', '0001_initial'),\n\t]\n\toperations = [\n\t\tmigrations.RenameField(\n\t\t\tmodel_name='modela',\n\t\t\told_name='field_wrong',\n\t\t\tnew_name='field_fixed',\n\t\t),\n\t\tmigrations.AlterField(\n\t\t\tmodel_name='modelb',\n\t\t\tname='modela',\n\t\t\tfield=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app1.ModelB', to_field='field_wrong'),\n\t\t),\n\t]\n", + "golden_patch": "diff --git a/django/db/migrations/autodetector.py b/django/db/migrations/autodetector.py\n--- a/django/db/migrations/autodetector.py\n+++ b/django/db/migrations/autodetector.py\n@@ -927,6 +927,10 @@ def generate_altered_fields(self):\n if remote_field_name:\n to_field_rename_key = rename_key + (remote_field_name,)\n if to_field_rename_key in self.renamed_fields:\n+ # Repoint both model and field name because to_field\n+ # inclusion in ForeignKey.deconstruct() is based on\n+ # both.\n+ new_field.remote_field.model = old_field.remote_field.model\n new_field.remote_field.field_name = old_field.remote_field.field_name\n # Handle ForeignObjects which can have multiple from_fields/to_fields.\n from_fields = getattr(new_field, 'from_fields', None)\n", + "test_patch": "diff --git a/tests/migrations/test_autodetector.py b/tests/migrations/test_autodetector.py\n--- a/tests/migrations/test_autodetector.py\n+++ b/tests/migrations/test_autodetector.py\n@@ -932,6 +932,30 @@ def test_rename_foreign_object_fields(self):\n changes, 'app', 0, 1, model_name='bar', old_name='second', new_name='second_renamed',\n )\n \n+ def test_rename_referenced_primary_key(self):\n+ before = [\n+ ModelState('app', 'Foo', [\n+ ('id', models.CharField(primary_key=True, serialize=False)),\n+ ]),\n+ ModelState('app', 'Bar', [\n+ ('id', models.AutoField(primary_key=True)),\n+ ('foo', models.ForeignKey('app.Foo', models.CASCADE)),\n+ ]),\n+ ]\n+ after = [\n+ ModelState('app', 'Foo', [\n+ ('renamed_id', models.CharField(primary_key=True, serialize=False))\n+ ]),\n+ ModelState('app', 'Bar', [\n+ ('id', models.AutoField(primary_key=True)),\n+ ('foo', models.ForeignKey('app.Foo', models.CASCADE)),\n+ ]),\n+ ]\n+ changes = self.get_changes(before, after, MigrationQuestioner({'ask_rename': True}))\n+ self.assertNumberMigrations(changes, 'app', 1)\n+ self.assertOperationTypes(changes, 'app', 0, ['RenameField'])\n+ self.assertOperationAttributes(changes, 'app', 0, 0, old_name='id', new_name='renamed_id')\n+\n def test_rename_field_preserved_db_column(self):\n \"\"\"\n RenameField is used if a field is renamed and db_column equal to the\n", + "fail_to_pass": "[\"test_rename_referenced_primary_key (migrations.test_autodetector.AutodetectorTests)\"]", + "pass_to_pass": "[\"test_add_alter_order_with_respect_to (migrations.test_autodetector.AutodetectorTests)\", \"test_add_blank_textfield_and_charfield (migrations.test_autodetector.AutodetectorTests)\", \"Test change detection of new constraints.\", \"test_add_date_fields_with_auto_now_add_asking_for_default (migrations.test_autodetector.AutodetectorTests)\", \"test_add_date_fields_with_auto_now_add_not_asking_for_null_addition (migrations.test_autodetector.AutodetectorTests)\", \"test_add_date_fields_with_auto_now_not_asking_for_default (migrations.test_autodetector.AutodetectorTests)\", \"Tests autodetection of new fields.\", \"test_add_field_and_foo_together (migrations.test_autodetector.AutodetectorTests)\", \"#22030 - Adding a field with a default should work.\", \"Tests index/unique_together detection.\", \"Test change detection of new indexes.\", \"#22435 - Adding a ManyToManyField should not prompt for a default.\", \"test_add_model_order_with_respect_to (migrations.test_autodetector.AutodetectorTests)\", \"test_add_non_blank_textfield_and_charfield (migrations.test_autodetector.AutodetectorTests)\", \"Tests detection for adding db_table in model's options.\", \"Tests detection for changing db_table in model's options'.\", \"test_alter_db_table_no_changes (migrations.test_autodetector.AutodetectorTests)\", \"Tests detection for removing db_table in model's options.\", \"test_alter_db_table_with_model_change (migrations.test_autodetector.AutodetectorTests)\", \"test_alter_field_to_fk_dependency_other_app (migrations.test_autodetector.AutodetectorTests)\", \"test_alter_field_to_not_null_oneoff_default (migrations.test_autodetector.AutodetectorTests)\", \"test_alter_field_to_not_null_with_default (migrations.test_autodetector.AutodetectorTests)\", \"test_alter_field_to_not_null_without_default (migrations.test_autodetector.AutodetectorTests)\", \"test_alter_fk_before_model_deletion (migrations.test_autodetector.AutodetectorTests)\", \"test_alter_many_to_many (migrations.test_autodetector.AutodetectorTests)\", \"test_alter_model_managers (migrations.test_autodetector.AutodetectorTests)\", \"Changing a model's options should make a change.\", \"Changing a proxy model's options should also make a change.\", \"Tests auto-naming of migrations for graph matching.\", \"Bases of other models come first.\", \"test_circular_dependency_mixed_addcreate (migrations.test_autodetector.AutodetectorTests)\", \"test_circular_dependency_swappable (migrations.test_autodetector.AutodetectorTests)\", \"test_circular_dependency_swappable2 (migrations.test_autodetector.AutodetectorTests)\", \"test_circular_dependency_swappable_self (migrations.test_autodetector.AutodetectorTests)\", \"test_circular_fk_dependency (migrations.test_autodetector.AutodetectorTests)\", \"test_concrete_field_changed_to_many_to_many (migrations.test_autodetector.AutodetectorTests)\", \"test_create_model_and_unique_together (migrations.test_autodetector.AutodetectorTests)\", \"Test creation of new model with constraints already defined.\", \"Test creation of new model with indexes already defined.\", \"test_create_with_through_model (migrations.test_autodetector.AutodetectorTests)\", \"test_custom_deconstructible (migrations.test_autodetector.AutodetectorTests)\", \"Tests custom naming of migrations for graph matching.\", \"Field instances are handled correctly by nested deconstruction.\", \"test_deconstruct_type (migrations.test_autodetector.AutodetectorTests)\", \"Nested deconstruction descends into dict values.\", \"Nested deconstruction descends into lists.\", \"Nested deconstruction descends into tuples.\", \"test_default_related_name_option (migrations.test_autodetector.AutodetectorTests)\", \"test_different_regex_does_alter (migrations.test_autodetector.AutodetectorTests)\", \"test_empty_foo_together (migrations.test_autodetector.AutodetectorTests)\", \"test_first_dependency (migrations.test_autodetector.AutodetectorTests)\", \"Having a ForeignKey automatically adds a dependency.\", \"test_fk_dependency_other_app (migrations.test_autodetector.AutodetectorTests)\", \"test_foo_together_no_changes (migrations.test_autodetector.AutodetectorTests)\", \"test_foo_together_ordering (migrations.test_autodetector.AutodetectorTests)\", \"Tests unique_together and field removal detection & ordering\", \"test_foreign_key_removed_before_target_model (migrations.test_autodetector.AutodetectorTests)\", \"test_identical_regex_doesnt_alter (migrations.test_autodetector.AutodetectorTests)\", \"test_keep_db_table_with_model_change (migrations.test_autodetector.AutodetectorTests)\", \"test_last_dependency (migrations.test_autodetector.AutodetectorTests)\", \"test_m2m_w_through_multistep_remove (migrations.test_autodetector.AutodetectorTests)\", \"test_managed_to_unmanaged (migrations.test_autodetector.AutodetectorTests)\", \"test_many_to_many_changed_to_concrete_field (migrations.test_autodetector.AutodetectorTests)\", \"test_many_to_many_removed_before_through_model (migrations.test_autodetector.AutodetectorTests)\", \"test_many_to_many_removed_before_through_model_2 (migrations.test_autodetector.AutodetectorTests)\", \"test_mti_inheritance_model_removal (migrations.test_autodetector.AutodetectorTests)\", \"#23956 - Inheriting models doesn't move *_ptr fields into AddField operations.\", \"test_nested_deconstructible_objects (migrations.test_autodetector.AutodetectorTests)\", \"Tests autodetection of new models.\", \"test_non_circular_foreignkey_dependency_removal (migrations.test_autodetector.AutodetectorTests)\", \"Tests deletion of old models.\", \"Test change detection of reordering of fields in indexes.\", \"test_pk_fk_included (migrations.test_autodetector.AutodetectorTests)\", \"The autodetector correctly deals with proxy models.\", \"Bases of proxies come first.\", \"test_proxy_custom_pk (migrations.test_autodetector.AutodetectorTests)\", \"FK dependencies still work on proxy models.\", \"test_proxy_to_mti_with_fk_to_proxy (migrations.test_autodetector.AutodetectorTests)\", \"test_proxy_to_mti_with_fk_to_proxy_proxy (migrations.test_autodetector.AutodetectorTests)\", \"test_remove_alter_order_with_respect_to (migrations.test_autodetector.AutodetectorTests)\", \"Test change detection of removed constraints.\", \"Tests autodetection of removed fields.\", \"test_remove_field_and_foo_together (migrations.test_autodetector.AutodetectorTests)\", \"Test change detection of removed indexes.\", \"Tests autodetection of renamed fields.\", \"test_rename_field_and_foo_together (migrations.test_autodetector.AutodetectorTests)\", \"test_rename_field_foreign_key_to_field (migrations.test_autodetector.AutodetectorTests)\", \"test_rename_field_preserved_db_column (migrations.test_autodetector.AutodetectorTests)\", \"test_rename_foreign_object_fields (migrations.test_autodetector.AutodetectorTests)\", \"test_rename_m2m_through_model (migrations.test_autodetector.AutodetectorTests)\", \"Tests autodetection of renamed models.\", \"test_rename_model_reverse_relation_dependencies (migrations.test_autodetector.AutodetectorTests)\", \"test_rename_model_with_fks_in_different_position (migrations.test_autodetector.AutodetectorTests)\", \"test_rename_model_with_renamed_rel_field (migrations.test_autodetector.AutodetectorTests)\", \"test_rename_related_field_preserved_db_column (migrations.test_autodetector.AutodetectorTests)\", \"test_replace_string_with_foreignkey (migrations.test_autodetector.AutodetectorTests)\", \"test_same_app_circular_fk_dependency (migrations.test_autodetector.AutodetectorTests)\", \"test_same_app_circular_fk_dependency_with_unique_together_and_indexes (migrations.test_autodetector.AutodetectorTests)\", \"test_same_app_no_fk_dependency (migrations.test_autodetector.AutodetectorTests)\", \"Setting order_with_respect_to adds a field.\", \"test_supports_functools_partial (migrations.test_autodetector.AutodetectorTests)\", \"test_swappable (migrations.test_autodetector.AutodetectorTests)\", \"test_swappable_changed (migrations.test_autodetector.AutodetectorTests)\", \"test_swappable_circular_multi_mti (migrations.test_autodetector.AutodetectorTests)\", \"Swappable models get their CreateModel first.\", \"test_trim_apps (migrations.test_autodetector.AutodetectorTests)\", \"The autodetector correctly deals with managed models.\", \"test_unmanaged_custom_pk (migrations.test_autodetector.AutodetectorTests)\", \"test_unmanaged_delete (migrations.test_autodetector.AutodetectorTests)\", \"test_unmanaged_to_managed (migrations.test_autodetector.AutodetectorTests)\"]", + "expected_spans": { + "django/db/migrations/autodetector.py": [ + "MigrationAutodetector.generate_altered_fields" + ] + }, + "test_file_spans": { + "tests/migrations/test_autodetector.py": [ + "AutodetectorTests.test_rename_field_preserved_db_column" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-11964", + "repo": "django/django", + "base_commit": "fc2b1cc926e34041953738e58fa6ad3053059b22", + "problem_statement": "The value of a TextChoices/IntegerChoices field has a differing type\nDescription\n\t\nIf we create an instance of a model having a CharField or IntegerField with the keyword choices pointing to IntegerChoices or TextChoices, the value returned by the getter of the field will be of the same type as the one created by enum.Enum (enum value).\nFor example, this model:\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nclass MyChoice(models.TextChoices):\n\tFIRST_CHOICE = \"first\", _(\"The first choice, it is\")\n\tSECOND_CHOICE = \"second\", _(\"The second choice, it is\")\nclass MyObject(models.Model):\n\tmy_str_value = models.CharField(max_length=10, choices=MyChoice.choices)\nThen this test:\nfrom django.test import TestCase\nfrom testing.pkg.models import MyObject, MyChoice\nclass EnumTest(TestCase):\n\tdef setUp(self) -> None:\n\t\tself.my_object = MyObject.objects.create(my_str_value=MyChoice.FIRST_CHOICE)\n\tdef test_created_object_is_str(self):\n\t\tmy_object = self.my_object\n\t\tself.assertIsInstance(my_object.my_str_value, str)\n\t\tself.assertEqual(str(my_object.my_str_value), \"first\")\n\tdef test_retrieved_object_is_str(self):\n\t\tmy_object = MyObject.objects.last()\n\t\tself.assertIsInstance(my_object.my_str_value, str)\n\t\tself.assertEqual(str(my_object.my_str_value), \"first\")\nAnd then the results:\n(django30-venv) \u279c django30 ./manage.py test\nCreating test database for alias 'default'...\nSystem check identified no issues (0 silenced).\nF.\n======================================================================\nFAIL: test_created_object_is_str (testing.tests.EnumTest)\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"/Users/mikailkocak/Development/django30/testing/tests.py\", line 14, in test_created_object_is_str\n\tself.assertEqual(str(my_object.my_str_value), \"first\")\nAssertionError: 'MyChoice.FIRST_CHOICE' != 'first'\n- MyChoice.FIRST_CHOICE\n+ first\n----------------------------------------------------------------------\nRan 2 tests in 0.002s\nFAILED (failures=1)\nWe notice when invoking __str__(...) we don't actually get the value property of the enum value which can lead to some unexpected issues, especially when communicating to an external API with a freshly created instance that will send MyEnum.MyValue, and the one that was retrieved would send my_value.\n", + "golden_patch": "diff --git a/django/db/models/enums.py b/django/db/models/enums.py\n--- a/django/db/models/enums.py\n+++ b/django/db/models/enums.py\n@@ -60,7 +60,13 @@ def values(cls):\n \n class Choices(enum.Enum, metaclass=ChoicesMeta):\n \"\"\"Class for creating enumerated choices.\"\"\"\n- pass\n+\n+ def __str__(self):\n+ \"\"\"\n+ Use value when cast to str, so that Choices set as model instance\n+ attributes are rendered as expected in templates and similar contexts.\n+ \"\"\"\n+ return str(self.value)\n \n \n class IntegerChoices(int, Choices):\n", + "test_patch": "diff --git a/tests/model_enums/tests.py b/tests/model_enums/tests.py\n--- a/tests/model_enums/tests.py\n+++ b/tests/model_enums/tests.py\n@@ -143,6 +143,12 @@ class Fruit(models.IntegerChoices):\n APPLE = 1, 'Apple'\n PINEAPPLE = 1, 'Pineapple'\n \n+ def test_str(self):\n+ for test in [Gender, Suit, YearInSchool, Vehicle]:\n+ for member in test:\n+ with self.subTest(member=member):\n+ self.assertEqual(str(test[member.name]), str(member.value))\n+\n \n class Separator(bytes, models.Choices):\n FS = b'\\x1c', 'File Separator'\n", + "fail_to_pass": "[\"test_str (model_enums.tests.ChoicesTests)\", \"test_textchoices (model_enums.tests.ChoicesTests)\"]", + "pass_to_pass": "[\"test_integerchoices (model_enums.tests.ChoicesTests)\", \"test_integerchoices_auto_label (model_enums.tests.ChoicesTests)\", \"test_integerchoices_containment (model_enums.tests.ChoicesTests)\", \"test_integerchoices_empty_label (model_enums.tests.ChoicesTests)\", \"test_integerchoices_functional_api (model_enums.tests.ChoicesTests)\", \"test_invalid_definition (model_enums.tests.ChoicesTests)\", \"test_textchoices_auto_label (model_enums.tests.ChoicesTests)\", \"test_textchoices_blank_value (model_enums.tests.ChoicesTests)\", \"test_textchoices_containment (model_enums.tests.ChoicesTests)\", \"test_textchoices_empty_label (model_enums.tests.ChoicesTests)\", \"test_textchoices_functional_api (model_enums.tests.ChoicesTests)\", \"test_bool_unsupported (model_enums.tests.CustomChoicesTests)\", \"test_labels_valid (model_enums.tests.CustomChoicesTests)\", \"test_timezone_unsupported (model_enums.tests.CustomChoicesTests)\", \"test_uuid_unsupported (model_enums.tests.CustomChoicesTests)\"]", + "expected_spans": { + "django/db/models/enums.py": [ + "Choices" + ] + }, + "test_file_spans": { + "tests/model_enums/tests.py": [ + "ChoicesTests" + ] + }, + "resolved_by": [ + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/db/models/enums.py": [ + "Choices" + ] + }, + "alternative_spans": { + "django/db/models/enums.py": [ + "Choices" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/models/enums.py": [ + "Choices" + ] + }, + "alternative_spans": { + "django/db/models/enums.py": [ + "Choices" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/db/models/enums.py": [ + "IntegerChoices" + ] + }, + "alternative_spans": { + "django/db/models/enums.py": [ + "IntegerChoices" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/db/models/enums.py": [ + "imports", + "ChoicesMeta.__new__", + "ChoicesMeta.__contains__" + ] + }, + "alternative_spans": { + "django/db/models/enums.py": [ + "imports", + "ChoicesMeta.__new__", + "ChoicesMeta.__contains__" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/db/models/enums.py": [ + "Choices" + ] + }, + "alternative_spans": { + "django/db/models/enums.py": [ + "Choices" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/db/models/enums.py": [ + "IntegerChoices" + ] + }, + "alternative_spans": { + "django/db/models/enums.py": [ + "IntegerChoices" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/db/models/enums.py": [ + "IntegerChoices" + ] + }, + "alternative_spans": { + "django/db/models/enums.py": [ + "IntegerChoices" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/db/models/enums.py": [ + "Choices" + ] + }, + "alternative_spans": { + "django/db/models/enums.py": [ + "Choices" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240706_sima_gpt4o", + "spans": { + "django/db/models/enums.py": [ + "IntegerChoices" + ] + } + }, + { + "run_name": "20240806_SuperCoder2.0", + "spans": { + "django/db/models/enums.py": [ + "imports", + "ChoicesMeta.__new__", + "ChoicesMeta.__contains__" + ] + } + }, + { + "run_name": "20240617_moatless_gpt4o", + "spans": { + "django/db/models/enums.py": [ + "IntegerChoices" + ] + } + }, + { + "run_name": "20240811_gru", + "spans": { + "django/db/models/enums.py": [ + "IntegerChoices" + ] + } + } + ] + }, + { + "instance_id": "django__django-11999", + "repo": "django/django", + "base_commit": "84633905273fc916e3d17883810d9969c03f73c2", + "problem_statement": "Cannot override get_FOO_display() in Django 2.2+.\nDescription\n\t\nI cannot override the get_FIELD_display function on models since version 2.2. It works in version 2.1.\nExample:\nclass FooBar(models.Model):\n\tfoo_bar = models.CharField(_(\"foo\"), choices=[(1, 'foo'), (2, 'bar')])\n\tdef __str__(self):\n\t\treturn self.get_foo_bar_display() # This returns 'foo' or 'bar' in 2.2, but 'something' in 2.1\n\tdef get_foo_bar_display(self):\n\t\treturn \"something\"\nWhat I expect is that I should be able to override this function.\n", + "golden_patch": "diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -763,8 +763,12 @@ def contribute_to_class(self, cls, name, private_only=False):\n if not getattr(cls, self.attname, None):\n setattr(cls, self.attname, self.descriptor_class(self))\n if self.choices is not None:\n- setattr(cls, 'get_%s_display' % self.name,\n- partialmethod(cls._get_FIELD_display, field=self))\n+ if not hasattr(cls, 'get_%s_display' % self.name):\n+ setattr(\n+ cls,\n+ 'get_%s_display' % self.name,\n+ partialmethod(cls._get_FIELD_display, field=self),\n+ )\n \n def get_filter_kwargs_for_object(self, obj):\n \"\"\"\n", + "test_patch": "diff --git a/tests/model_fields/tests.py b/tests/model_fields/tests.py\n--- a/tests/model_fields/tests.py\n+++ b/tests/model_fields/tests.py\n@@ -168,6 +168,16 @@ def test_get_FIELD_display_translated(self):\n self.assertIsInstance(val, str)\n self.assertEqual(val, 'translated')\n \n+ def test_overriding_FIELD_display(self):\n+ class FooBar(models.Model):\n+ foo_bar = models.IntegerField(choices=[(1, 'foo'), (2, 'bar')])\n+\n+ def get_foo_bar_display(self):\n+ return 'something'\n+\n+ f = FooBar(foo_bar=1)\n+ self.assertEqual(f.get_foo_bar_display(), 'something')\n+\n def test_iterator_choices(self):\n \"\"\"\n get_choices() works with Iterators.\n", + "fail_to_pass": "[\"test_overriding_FIELD_display (model_fields.tests.GetFieldDisplayTests)\"]", + "pass_to_pass": "[\"test_blank_in_choices (model_fields.tests.GetChoicesTests)\", \"test_blank_in_grouped_choices (model_fields.tests.GetChoicesTests)\", \"test_empty_choices (model_fields.tests.GetChoicesTests)\", \"test_lazy_strings_not_evaluated (model_fields.tests.GetChoicesTests)\", \"test_check (model_fields.tests.ChoicesTests)\", \"test_choices (model_fields.tests.ChoicesTests)\", \"test_flatchoices (model_fields.tests.ChoicesTests)\", \"test_formfield (model_fields.tests.ChoicesTests)\", \"test_invalid_choice (model_fields.tests.ChoicesTests)\", \"Can supply a custom choices form class to Field.formfield()\", \"deconstruct() uses __qualname__ for nested class support.\", \"Field instances can be pickled.\", \"test_field_name (model_fields.tests.BasicFieldTests)\", \"Fields are ordered based on their creation.\", \"test_field_repr (model_fields.tests.BasicFieldTests)\", \"__repr__() uses __qualname__ for nested class support.\", \"test_field_str (model_fields.tests.BasicFieldTests)\", \"test_field_verbose_name (model_fields.tests.BasicFieldTests)\", \"Field.formfield() sets disabled for fields with choices.\", \"test_show_hidden_initial (model_fields.tests.BasicFieldTests)\", \"test_choices_and_field_display (model_fields.tests.GetFieldDisplayTests)\", \"test_empty_iterator_choices (model_fields.tests.GetFieldDisplayTests)\", \"A translated display value is coerced to str.\", \"test_iterator_choices (model_fields.tests.GetFieldDisplayTests)\", \"test_get_choices (model_fields.tests.GetChoicesLimitChoicesToTests)\", \"test_get_choices_reverse_related_field (model_fields.tests.GetChoicesLimitChoicesToTests)\", \"test_get_choices (model_fields.tests.GetChoicesOrderingTests)\", \"test_get_choices_default_ordering (model_fields.tests.GetChoicesOrderingTests)\", \"test_get_choices_reverse_related_field (model_fields.tests.GetChoicesOrderingTests)\", \"test_get_choices_reverse_related_field_default_ordering (model_fields.tests.GetChoicesOrderingTests)\"]", + "expected_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "test_file_spans": { + "tests/model_fields/tests.py": [ + "GetFieldDisplayTests.test_iterator_choices" + ] + }, + "resolved_by": [ + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.set_attributes_from_name", + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.set_attributes_from_name", + "Field.contribute_to_class" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "pytest-dev__pytest-6116", + "repo": "pytest-dev/pytest", + "base_commit": "e670ff76cbad80108bde9bab616b66771b8653cf", + "problem_statement": "pytest --collect-only needs a one char shortcut command\nI find myself needing to run `--collect-only` very often and that cli argument is a very long to type one. \r\n\r\nI do think that it would be great to allocate a character for it, not sure which one yet. Please use up/down thumbs to vote if you would find it useful or not and eventually proposing which char should be used. \r\n\r\nClearly this is a change very easy to implement but first I want to see if others would find it useful or not.\npytest --collect-only needs a one char shortcut command\nI find myself needing to run `--collect-only` very often and that cli argument is a very long to type one. \r\n\r\nI do think that it would be great to allocate a character for it, not sure which one yet. Please use up/down thumbs to vote if you would find it useful or not and eventually proposing which char should be used. \r\n\r\nClearly this is a change very easy to implement but first I want to see if others would find it useful or not.\n", + "golden_patch": "diff --git a/src/_pytest/main.py b/src/_pytest/main.py\n--- a/src/_pytest/main.py\n+++ b/src/_pytest/main.py\n@@ -109,6 +109,7 @@ def pytest_addoption(parser):\n group.addoption(\n \"--collectonly\",\n \"--collect-only\",\n+ \"--co\",\n action=\"store_true\",\n help=\"only collect tests, don't execute them.\",\n ),\n", + "test_patch": "diff --git a/testing/test_collection.py b/testing/test_collection.py\n--- a/testing/test_collection.py\n+++ b/testing/test_collection.py\n@@ -402,7 +402,7 @@ def pytest_collect_file(path, parent):\n )\n testdir.mkdir(\"sub\")\n testdir.makepyfile(\"def test_x(): pass\")\n- result = testdir.runpytest(\"--collect-only\")\n+ result = testdir.runpytest(\"--co\")\n result.stdout.fnmatch_lines([\"*MyModule*\", \"*test_x*\"])\n \n def test_pytest_collect_file_from_sister_dir(self, testdir):\n@@ -433,7 +433,7 @@ def pytest_collect_file(path, parent):\n p = testdir.makepyfile(\"def test_x(): pass\")\n p.copy(sub1.join(p.basename))\n p.copy(sub2.join(p.basename))\n- result = testdir.runpytest(\"--collect-only\")\n+ result = testdir.runpytest(\"--co\")\n result.stdout.fnmatch_lines([\"*MyModule1*\", \"*MyModule2*\", \"*test_x*\"])\n \n \n", + "fail_to_pass": "[\"testing/test_collection.py::TestCustomConftests::test_pytest_fs_collect_hooks_are_seen\", \"testing/test_collection.py::TestCustomConftests::test_pytest_collect_file_from_sister_dir\"]", + "pass_to_pass": "[\"testing/test_collection.py::TestCollector::test_collect_versus_item\", \"testing/test_collection.py::TestCollector::test_check_equality\", \"testing/test_collection.py::TestCollector::test_getparent\", \"testing/test_collection.py::TestCollector::test_getcustomfile_roundtrip\", \"testing/test_collection.py::TestCollector::test_can_skip_class_with_test_attr\", \"testing/test_collection.py::TestCollectFS::test_ignored_certain_directories\", \"testing/test_collection.py::TestCollectFS::test_ignored_virtualenvs[activate]\", \"testing/test_collection.py::TestCollectFS::test_ignored_virtualenvs[activate.csh]\", \"testing/test_collection.py::TestCollectFS::test_ignored_virtualenvs[activate.fish]\", \"testing/test_collection.py::TestCollectFS::test_ignored_virtualenvs[Activate]\", \"testing/test_collection.py::TestCollectFS::test_ignored_virtualenvs[Activate.bat]\", \"testing/test_collection.py::TestCollectFS::test_ignored_virtualenvs[Activate.ps1]\", \"testing/test_collection.py::TestCollectFS::test_ignored_virtualenvs_norecursedirs_precedence[activate]\", \"testing/test_collection.py::TestCollectFS::test_ignored_virtualenvs_norecursedirs_precedence[activate.csh]\", \"testing/test_collection.py::TestCollectFS::test_ignored_virtualenvs_norecursedirs_precedence[activate.fish]\", \"testing/test_collection.py::TestCollectFS::test_ignored_virtualenvs_norecursedirs_precedence[Activate]\", \"testing/test_collection.py::TestCollectFS::test_ignored_virtualenvs_norecursedirs_precedence[Activate.bat]\", \"testing/test_collection.py::TestCollectFS::test_ignored_virtualenvs_norecursedirs_precedence[Activate.ps1]\", \"testing/test_collection.py::TestCollectFS::test__in_venv[activate]\", \"testing/test_collection.py::TestCollectFS::test__in_venv[activate.csh]\", \"testing/test_collection.py::TestCollectFS::test__in_venv[activate.fish]\", \"testing/test_collection.py::TestCollectFS::test__in_venv[Activate]\", \"testing/test_collection.py::TestCollectFS::test__in_venv[Activate.bat]\", \"testing/test_collection.py::TestCollectFS::test__in_venv[Activate.ps1]\", \"testing/test_collection.py::TestCollectFS::test_custom_norecursedirs\", \"testing/test_collection.py::TestCollectFS::test_testpaths_ini\", \"testing/test_collection.py::TestCollectPluginHookRelay::test_pytest_collect_file\", \"testing/test_collection.py::TestCollectPluginHookRelay::test_pytest_collect_directory\", \"testing/test_collection.py::TestPrunetraceback::test_custom_repr_failure\", \"testing/test_collection.py::TestCustomConftests::test_ignore_collect_path\", \"testing/test_collection.py::TestCustomConftests::test_ignore_collect_not_called_on_argument\", \"testing/test_collection.py::TestCustomConftests::test_collectignore_exclude_on_option\", \"testing/test_collection.py::TestCustomConftests::test_collectignoreglob_exclude_on_option\", \"testing/test_collection.py::TestSession::test_parsearg\", \"testing/test_collection.py::TestSession::test_collect_topdir\", \"testing/test_collection.py::TestSession::test_collect_protocol_single_function\", \"testing/test_collection.py::TestSession::test_collect_protocol_method\", \"testing/test_collection.py::TestSession::test_collect_custom_nodes_multi_id\", \"testing/test_collection.py::TestSession::test_collect_subdir_event_ordering\", \"testing/test_collection.py::TestSession::test_collect_two_commandline_args\", \"testing/test_collection.py::TestSession::test_serialization_byid\", \"testing/test_collection.py::TestSession::test_find_byid_without_instance_parents\", \"testing/test_collection.py::Test_getinitialnodes::test_global_file\", \"testing/test_collection.py::Test_getinitialnodes::test_pkgfile\", \"testing/test_collection.py::Test_genitems::test_check_collect_hashes\", \"testing/test_collection.py::Test_genitems::test_example_items1\", \"testing/test_collection.py::Test_genitems::test_class_and_functions_discovery_using_glob\", \"testing/test_collection.py::test_matchnodes_two_collections_same_file\", \"testing/test_collection.py::TestNodekeywords::test_no_under\", \"testing/test_collection.py::TestNodekeywords::test_issue345\", \"testing/test_collection.py::test_exit_on_collection_error\", \"testing/test_collection.py::test_exit_on_collection_with_maxfail_smaller_than_n_errors\", \"testing/test_collection.py::test_exit_on_collection_with_maxfail_bigger_than_n_errors\", \"testing/test_collection.py::test_continue_on_collection_errors\", \"testing/test_collection.py::test_continue_on_collection_errors_maxfail\", \"testing/test_collection.py::test_fixture_scope_sibling_conftests\", \"testing/test_collection.py::test_collect_init_tests\", \"testing/test_collection.py::test_collect_invalid_signature_message\", \"testing/test_collection.py::test_collect_handles_raising_on_dunder_class\", \"testing/test_collection.py::test_collect_with_chdir_during_import\", \"testing/test_collection.py::test_collect_symlink_file_arg\", \"testing/test_collection.py::test_collect_symlink_out_of_tree\", \"testing/test_collection.py::test_collectignore_via_conftest\", \"testing/test_collection.py::test_collect_pkg_init_and_file_in_args\", \"testing/test_collection.py::test_collect_pkg_init_only\", \"testing/test_collection.py::test_collect_sub_with_symlinks[True]\", \"testing/test_collection.py::test_collect_sub_with_symlinks[False]\", \"testing/test_collection.py::test_collector_respects_tbstyle\", \"testing/test_collection.py::test_collect_pyargs_with_testpaths\"]", + "expected_spans": { + "src/_pytest/main.py": [ + "pytest_addoption" + ] + }, + "test_file_spans": { + "testing/test_collection.py": [ + "TestCustomConftests.test_pytest_fs_collect_hooks_are_seen", + "TestCustomConftests.test_pytest_collect_file_from_sister_dir" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "scikit-learn__scikit-learn-15512", + "repo": "scikit-learn/scikit-learn", + "base_commit": "b8a4da8baa1137f173e7035f104067c7d2ffde22", + "problem_statement": "Return values of non converged affinity propagation clustering\nThe affinity propagation Documentation states: \r\n\"When the algorithm does not converge, it returns an empty array as cluster_center_indices and -1 as label for each training sample.\"\r\n\r\nExample:\r\n```python\r\nfrom sklearn.cluster import AffinityPropagation\r\nimport pandas as pd\r\n\r\ndata = pd.DataFrame([[1,0,0,0,0,0],[0,1,1,1,0,0],[0,0,1,0,0,1]])\r\naf = AffinityPropagation(affinity='euclidean', verbose=True, copy=False, max_iter=2).fit(data)\r\n\r\nprint(af.cluster_centers_indices_)\r\nprint(af.labels_)\r\n\r\n```\r\nI would expect that the clustering here (which does not converge) prints first an empty List and then [-1,-1,-1], however, I get [2] as cluster center and [0,0,0] as cluster labels. \r\nThe only way I currently know if the clustering fails is if I use the verbose option, however that is very unhandy. A hacky solution is to check if max_iter == n_iter_ but it could have converged exactly 15 iterations before max_iter (although unlikely).\r\nI am not sure if this is intended behavior and the documentation is wrong?\r\n\r\nFor my use-case within a bigger script, I would prefer to get back -1 values or have a property to check if it has converged, as otherwise, a user might not be aware that the clustering never converged.\r\n\r\n\r\n#### Versions\r\nSystem:\r\n python: 3.6.7 | packaged by conda-forge | (default, Nov 21 2018, 02:32:25) [GCC 4.8.2 20140120 (Red Hat 4.8.2-15)]\r\nexecutable: /home/jenniferh/Programs/anaconda3/envs/TF_RDKit_1_19/bin/python\r\n machine: Linux-4.15.0-52-generic-x86_64-with-debian-stretch-sid\r\nBLAS:\r\n macros: SCIPY_MKL_H=None, HAVE_CBLAS=None\r\n lib_dirs: /home/jenniferh/Programs/anaconda3/envs/TF_RDKit_1_19/lib\r\ncblas_libs: mkl_rt, pthread\r\nPython deps:\r\n pip: 18.1\r\n setuptools: 40.6.3\r\n sklearn: 0.20.3\r\n numpy: 1.15.4\r\n scipy: 1.2.0\r\n Cython: 0.29.2\r\n pandas: 0.23.4\r\n\r\n\n", + "golden_patch": "diff --git a/sklearn/cluster/_affinity_propagation.py b/sklearn/cluster/_affinity_propagation.py\n--- a/sklearn/cluster/_affinity_propagation.py\n+++ b/sklearn/cluster/_affinity_propagation.py\n@@ -194,17 +194,19 @@ def affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200,\n unconverged = (np.sum((se == convergence_iter) + (se == 0))\n != n_samples)\n if (not unconverged and (K > 0)) or (it == max_iter):\n+ never_converged = False\n if verbose:\n print(\"Converged after %d iterations.\" % it)\n break\n else:\n+ never_converged = True\n if verbose:\n print(\"Did not converge\")\n \n I = np.flatnonzero(E)\n K = I.size # Identify exemplars\n \n- if K > 0:\n+ if K > 0 and not never_converged:\n c = np.argmax(S[:, I], axis=1)\n c[I] = np.arange(K) # Identify clusters\n # Refine the final set of exemplars and clusters and return results\n@@ -408,6 +410,7 @@ def predict(self, X):\n Cluster labels.\n \"\"\"\n check_is_fitted(self)\n+ X = check_array(X)\n if not hasattr(self, \"cluster_centers_\"):\n raise ValueError(\"Predict method is not supported when \"\n \"affinity='precomputed'.\")\n", + "test_patch": "diff --git a/sklearn/cluster/tests/test_affinity_propagation.py b/sklearn/cluster/tests/test_affinity_propagation.py\n--- a/sklearn/cluster/tests/test_affinity_propagation.py\n+++ b/sklearn/cluster/tests/test_affinity_propagation.py\n@@ -152,6 +152,14 @@ def test_affinity_propagation_predict_non_convergence():\n assert_array_equal(np.array([-1, -1, -1]), y)\n \n \n+def test_affinity_propagation_non_convergence_regressiontest():\n+ X = np.array([[1, 0, 0, 0, 0, 0],\n+ [0, 1, 1, 1, 0, 0],\n+ [0, 0, 1, 0, 0, 1]])\n+ af = AffinityPropagation(affinity='euclidean', max_iter=2).fit(X)\n+ assert_array_equal(np.array([-1, -1, -1]), af.labels_)\n+\n+\n def test_equal_similarities_and_preferences():\n # Unequal distances\n X = np.array([[0, 0], [1, 1], [-2, -2]])\n", + "fail_to_pass": "[\"sklearn/cluster/tests/test_affinity_propagation.py::test_affinity_propagation_non_convergence_regressiontest\"]", + "pass_to_pass": "[\"sklearn/cluster/tests/test_affinity_propagation.py::test_affinity_propagation\", \"sklearn/cluster/tests/test_affinity_propagation.py::test_affinity_propagation_predict\", \"sklearn/cluster/tests/test_affinity_propagation.py::test_affinity_propagation_predict_error\", \"sklearn/cluster/tests/test_affinity_propagation.py::test_affinity_propagation_fit_non_convergence\", \"sklearn/cluster/tests/test_affinity_propagation.py::test_affinity_propagation_equal_mutual_similarities\", \"sklearn/cluster/tests/test_affinity_propagation.py::test_affinity_propagation_predict_non_convergence\", \"sklearn/cluster/tests/test_affinity_propagation.py::test_equal_similarities_and_preferences\", \"sklearn/cluster/tests/test_affinity_propagation.py::test_affinity_propagation_convergence_warning_dense_sparse[centers0]\", \"sklearn/cluster/tests/test_affinity_propagation.py::test_affinity_propagation_convergence_warning_dense_sparse[centers1]\"]", + "expected_spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation", + "AffinityPropagation.predict" + ] + }, + "test_file_spans": { + "sklearn/cluster/tests/test_affinity_propagation.py": [ + "test_equal_similarities_and_preferences" + ] + }, + "resolved_by": [ + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation" + ] + }, + "alternative_spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation", + "AffinityPropagation", + "AffinityPropagation.fit", + "AffinityPropagation.predict" + ] + }, + "alternative_spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation", + "AffinityPropagation", + "AffinityPropagation.fit", + "AffinityPropagation.predict" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation", + "AffinityPropagation", + "AffinityPropagation.fit_predict" + ] + }, + "alternative_spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation", + "AffinityPropagation", + "AffinityPropagation.fit_predict" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation" + ] + }, + "alternative_spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation", + "AffinityPropagation.fit" + ], + "sklearn/cluster/tests/test_affinity_propagation.py": [] + }, + "alternative_spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation", + "AffinityPropagation.fit" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation", + "AffinityPropagation.fit" + ] + }, + "alternative_spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation", + "AffinityPropagation.fit" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation", + "AffinityPropagation.fit" + ], + "sklearn/cluster/tests/test_affinity_propagation.py": [ + "test_affinity_propagation_fit_non_convergence", + "test_affinity_propagation_predict_non_convergence" + ] + }, + "alternative_spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation", + "AffinityPropagation.fit" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation" + ] + }, + "alternative_spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240702_codestory_aide_mixed", + "spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation" + ] + } + }, + { + "run_name": "20241028_agentless-1.5_gpt4o", + "spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation", + "AffinityPropagation", + "AffinityPropagation.fit_predict" + ] + } + }, + { + "run_name": "20240617_factory_code_droid", + "spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation" + ] + } + }, + { + "run_name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation", + "AffinityPropagation.fit" + ] + } + }, + { + "run_name": "20240912_marscode-agent-dev", + "spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation", + "AffinityPropagation.fit" + ] + } + }, + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation", + "AffinityPropagation.fit" + ] + } + }, + { + "run_name": "20240620_sweagent_claude3.5sonnet", + "spans": { + "sklearn/cluster/_affinity_propagation.py": [ + "affinity_propagation" + ] + } + } + ] + }, + { + "instance_id": "scikit-learn__scikit-learn-15535", + "repo": "scikit-learn/scikit-learn", + "base_commit": "70b0ddea992c01df1a41588fa9e2d130fb6b13f8", + "problem_statement": "regression in input validation of clustering metrics\n```python\r\nfrom sklearn.metrics.cluster import mutual_info_score\r\nimport numpy as np\r\n\r\nx = np.random.choice(['a', 'b'], size=20).astype(object)\r\nmutual_info_score(x, x)\r\n```\r\nValueError: could not convert string to float: 'b'\r\n\r\nwhile\r\n```python\r\nx = np.random.choice(['a', 'b'], size=20)\r\nmutual_info_score(x, x)\r\n```\r\nworks with a warning?\r\n\r\nthis worked in 0.21.1 without a warning (as I think it should)\r\n\r\n\r\nEdit by @ogrisel: I removed the `.astype(object)` in the second code snippet.\n", + "golden_patch": "diff --git a/sklearn/metrics/cluster/_supervised.py b/sklearn/metrics/cluster/_supervised.py\n--- a/sklearn/metrics/cluster/_supervised.py\n+++ b/sklearn/metrics/cluster/_supervised.py\n@@ -43,10 +43,10 @@ def check_clusterings(labels_true, labels_pred):\n The predicted labels.\n \"\"\"\n labels_true = check_array(\n- labels_true, ensure_2d=False, ensure_min_samples=0\n+ labels_true, ensure_2d=False, ensure_min_samples=0, dtype=None,\n )\n labels_pred = check_array(\n- labels_pred, ensure_2d=False, ensure_min_samples=0\n+ labels_pred, ensure_2d=False, ensure_min_samples=0, dtype=None,\n )\n \n # input checks\n", + "test_patch": "diff --git a/sklearn/metrics/cluster/tests/test_common.py b/sklearn/metrics/cluster/tests/test_common.py\n--- a/sklearn/metrics/cluster/tests/test_common.py\n+++ b/sklearn/metrics/cluster/tests/test_common.py\n@@ -161,7 +161,9 @@ def generate_formats(y):\n y = np.array(y)\n yield y, 'array of ints'\n yield y.tolist(), 'list of ints'\n- yield [str(x) for x in y.tolist()], 'list of strs'\n+ yield [str(x) + \"-a\" for x in y.tolist()], 'list of strs'\n+ yield (np.array([str(x) + \"-a\" for x in y.tolist()], dtype=object),\n+ 'array of strs')\n yield y - 1, 'including negative ints'\n yield y + 1, 'strictly positive ints'\n \n", + "fail_to_pass": "[\"sklearn/metrics/cluster/tests/test_common.py::test_format_invariance[adjusted_mutual_info_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_format_invariance[adjusted_rand_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_format_invariance[completeness_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_format_invariance[homogeneity_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_format_invariance[mutual_info_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_format_invariance[normalized_mutual_info_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_format_invariance[v_measure_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_format_invariance[fowlkes_mallows_score]\"]", + "pass_to_pass": "[\"sklearn/metrics/cluster/tests/test_common.py::test_symmetric_non_symmetric_union\", \"sklearn/metrics/cluster/tests/test_common.py::test_symmetry[adjusted_rand_score-y10-y20]\", \"sklearn/metrics/cluster/tests/test_common.py::test_symmetry[v_measure_score-y11-y21]\", \"sklearn/metrics/cluster/tests/test_common.py::test_symmetry[mutual_info_score-y12-y22]\", \"sklearn/metrics/cluster/tests/test_common.py::test_symmetry[adjusted_mutual_info_score-y13-y23]\", \"sklearn/metrics/cluster/tests/test_common.py::test_symmetry[normalized_mutual_info_score-y14-y24]\", \"sklearn/metrics/cluster/tests/test_common.py::test_symmetry[fowlkes_mallows_score-y15-y25]\", \"sklearn/metrics/cluster/tests/test_common.py::test_non_symmetry[homogeneity_score-y10-y20]\", \"sklearn/metrics/cluster/tests/test_common.py::test_non_symmetry[completeness_score-y11-y21]\", \"sklearn/metrics/cluster/tests/test_common.py::test_normalized_output[adjusted_rand_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_normalized_output[homogeneity_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_normalized_output[completeness_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_normalized_output[v_measure_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_normalized_output[adjusted_mutual_info_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_normalized_output[fowlkes_mallows_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_normalized_output[normalized_mutual_info_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_permute_labels[adjusted_mutual_info_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_permute_labels[adjusted_rand_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_permute_labels[completeness_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_permute_labels[homogeneity_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_permute_labels[mutual_info_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_permute_labels[normalized_mutual_info_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_permute_labels[v_measure_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_permute_labels[fowlkes_mallows_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_permute_labels[silhouette_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_permute_labels[silhouette_manhattan]\", \"sklearn/metrics/cluster/tests/test_common.py::test_permute_labels[calinski_harabasz_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_permute_labels[davies_bouldin_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_format_invariance[silhouette_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_format_invariance[silhouette_manhattan]\", \"sklearn/metrics/cluster/tests/test_common.py::test_format_invariance[calinski_harabasz_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_format_invariance[davies_bouldin_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_single_sample[adjusted_mutual_info_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_single_sample[adjusted_rand_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_single_sample[completeness_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_single_sample[homogeneity_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_single_sample[mutual_info_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_single_sample[normalized_mutual_info_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_single_sample[v_measure_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_single_sample[fowlkes_mallows_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_inf_nan_input[adjusted_mutual_info_score-adjusted_mutual_info_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_inf_nan_input[adjusted_rand_score-adjusted_rand_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_inf_nan_input[completeness_score-completeness_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_inf_nan_input[homogeneity_score-homogeneity_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_inf_nan_input[mutual_info_score-mutual_info_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_inf_nan_input[normalized_mutual_info_score-normalized_mutual_info_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_inf_nan_input[v_measure_score-v_measure_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_inf_nan_input[fowlkes_mallows_score-fowlkes_mallows_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_inf_nan_input[silhouette_score-silhouette_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_inf_nan_input[silhouette_manhattan-metric_func9]\", \"sklearn/metrics/cluster/tests/test_common.py::test_inf_nan_input[calinski_harabasz_score-calinski_harabasz_score]\", \"sklearn/metrics/cluster/tests/test_common.py::test_inf_nan_input[davies_bouldin_score-davies_bouldin_score]\"]", + "expected_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + }, + "test_file_spans": { + "sklearn/metrics/cluster/tests/test_common.py": [ + "test_format_invariance" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "sklearn/utils/validation.py": [ + "check_array" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings", + "contingency_matrix", + "mutual_info_score" + ], + "sklearn/utils/validation.py": [ + "check_array" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings", + "contingency_matrix", + "mutual_info_score" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "imports", + "check_clusterings", + "mutual_info_score" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "imports", + "check_clusterings", + "mutual_info_score" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "docstring", + "imports", + "_comb2", + "check_clusterings", + "_generalized_average", + "contingency_matrix", + "adjusted_rand_score", + "homogeneity_completeness_v_measure", + "v_measure_score", + "mutual_info_score", + "adjusted_mutual_info_score", + "normalized_mutual_info_score", + "fowlkes_mallows_score", + "entropy" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "docstring", + "imports", + "_comb2", + "check_clusterings", + "_generalized_average", + "contingency_matrix", + "adjusted_rand_score", + "homogeneity_completeness_v_measure", + "v_measure_score", + "mutual_info_score", + "adjusted_mutual_info_score", + "normalized_mutual_info_score", + "fowlkes_mallows_score", + "entropy" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "_comb2", + "check_clusterings", + "adjusted_rand_score", + "homogeneity_score", + "completeness_score", + "v_measure_score", + "mutual_info_score", + "adjusted_mutual_info_score", + "normalized_mutual_info_score", + "fowlkes_mallows_score" + ], + "sklearn/metrics/cluster/tests/test_supervised.py": [] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "_comb2", + "check_clusterings", + "adjusted_rand_score", + "homogeneity_score", + "completeness_score", + "v_measure_score", + "mutual_info_score", + "adjusted_mutual_info_score", + "normalized_mutual_info_score", + "fowlkes_mallows_score" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ], + "sklearn/utils/validation.py": [ + "check_array" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ], + "sklearn/utils/validation.py": [ + "check_array" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ], + "sklearn/utils/validation.py": [ + "check_array" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ], + "sklearn/metrics/cluster/tests/test_supervised.py": [ + "test_contingency_matrix" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "docstring", + "imports", + "check_clusterings", + "contingency_matrix", + "adjusted_rand_score", + "mutual_info_score" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "docstring", + "imports", + "check_clusterings", + "contingency_matrix", + "adjusted_rand_score", + "mutual_info_score" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings", + "adjusted_rand_score", + "mutual_info_score" + ] + }, + "alternative_spans": { + "sklearn/metrics/cluster/_supervised.py": [ + "check_clusterings", + "adjusted_rand_score", + "mutual_info_score" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-12113", + "repo": "django/django", + "base_commit": "62254c5202e80a68f4fe6572a2be46a3d953de1a", + "problem_statement": "admin_views.test_multidb fails with persistent test SQLite database.\nDescription\n\t \n\t\t(last modified by Mariusz Felisiak)\n\t \nI've tried using persistent SQLite databases for the tests (to make use of\n--keepdb), but at least some test fails with:\nsqlite3.OperationalError: database is locked\nThis is not an issue when only using TEST[\"NAME\"] with \"default\" (which is good enough in terms of performance).\ndiff --git i/tests/test_sqlite.py w/tests/test_sqlite.py\nindex f1b65f7d01..9ce4e32e14 100644\n--- i/tests/test_sqlite.py\n+++ w/tests/test_sqlite.py\n@@ -15,9 +15,15 @@\n DATABASES = {\n\t 'default': {\n\t\t 'ENGINE': 'django.db.backends.sqlite3',\n+\t\t'TEST': {\n+\t\t\t'NAME': 'test_default.sqlite3'\n+\t\t},\n\t },\n\t 'other': {\n\t\t 'ENGINE': 'django.db.backends.sqlite3',\n+\t\t'TEST': {\n+\t\t\t'NAME': 'test_other.sqlite3'\n+\t\t},\n\t }\n }\n% tests/runtests.py admin_views.test_multidb -v 3 --keepdb --parallel 1\n\u2026\nOperations to perform:\n Synchronize unmigrated apps: admin_views, auth, contenttypes, messages, sessions, staticfiles\n Apply all migrations: admin, sites\nRunning pre-migrate handlers for application contenttypes\nRunning pre-migrate handlers for application auth\nRunning pre-migrate handlers for application sites\nRunning pre-migrate handlers for application sessions\nRunning pre-migrate handlers for application admin\nRunning pre-migrate handlers for application admin_views\nSynchronizing apps without migrations:\n Creating tables...\n\tRunning deferred SQL...\nRunning migrations:\n No migrations to apply.\nRunning post-migrate handlers for application contenttypes\nRunning post-migrate handlers for application auth\nRunning post-migrate handlers for application sites\nRunning post-migrate handlers for application sessions\nRunning post-migrate handlers for application admin\nRunning post-migrate handlers for application admin_views\nSystem check identified no issues (0 silenced).\nERROR\n======================================================================\nERROR: setUpClass (admin_views.test_multidb.MultiDatabaseTests)\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"\u2026/Vcs/django/django/db/backends/utils.py\", line 84, in _execute\n\treturn self.cursor.execute(sql, params)\n File \"\u2026/Vcs/django/django/db/backends/sqlite3/base.py\", line 391, in execute\n\treturn Database.Cursor.execute(self, query, params)\nsqlite3.OperationalError: database is locked\nThe above exception was the direct cause of the following exception:\nTraceback (most recent call last):\n File \"\u2026/Vcs/django/django/test/testcases.py\", line 1137, in setUpClass\n\tcls.setUpTestData()\n File \"\u2026/Vcs/django/tests/admin_views/test_multidb.py\", line 40, in setUpTestData\n\tusername='admin', password='something', email='test@test.org',\n File \"\u2026/Vcs/django/django/contrib/auth/models.py\", line 158, in create_superuser\n\treturn self._create_user(username, email, password, **extra_fields)\n File \"\u2026/Vcs/django/django/contrib/auth/models.py\", line 141, in _create_user\n\tuser.save(using=self._db)\n File \"\u2026/Vcs/django/django/contrib/auth/base_user.py\", line 66, in save\n\tsuper().save(*args, **kwargs)\n File \"\u2026/Vcs/django/django/db/models/base.py\", line 741, in save\n\tforce_update=force_update, update_fields=update_fields)\n File \"\u2026/Vcs/django/django/db/models/base.py\", line 779, in save_base\n\tforce_update, using, update_fields,\n File \"\u2026/Vcs/django/django/db/models/base.py\", line 870, in _save_table\n\tresult = self._do_insert(cls._base_manager, using, fields, update_pk, raw)\n File \"\u2026/Vcs/django/django/db/models/base.py\", line 908, in _do_insert\n\tusing=using, raw=raw)\n File \"\u2026/Vcs/django/django/db/models/manager.py\", line 82, in manager_method\n\treturn getattr(self.get_queryset(), name)(*args, **kwargs)\n File \"\u2026/Vcs/django/django/db/models/query.py\", line 1175, in _insert\n\treturn query.get_compiler(using=using).execute_sql(return_id)\n File \"\u2026/Vcs/django/django/db/models/sql/compiler.py\", line 1321, in execute_sql\n\tcursor.execute(sql, params)\n File \"\u2026/Vcs/django/django/db/backends/utils.py\", line 67, in execute\n\treturn self._execute_with_wrappers(sql, params, many=False, executor=self._execute)\n File \"\u2026/Vcs/django/django/db/backends/utils.py\", line 76, in _execute_with_wrappers\n\treturn executor(sql, params, many, context)\n File \"\u2026/Vcs/django/django/db/backends/utils.py\", line 84, in _execute\n\treturn self.cursor.execute(sql, params)\n File \"\u2026/Vcs/django/django/db/utils.py\", line 89, in __exit__\n\traise dj_exc_value.with_traceback(traceback) from exc_value\n File \"\u2026/Vcs/django/django/db/backends/utils.py\", line 84, in _execute\n\treturn self.cursor.execute(sql, params)\n File \"\u2026/Vcs/django/django/db/backends/sqlite3/base.py\", line 391, in execute\n\treturn Database.Cursor.execute(self, query, params)\ndjango.db.utils.OperationalError: database is locked\n", + "golden_patch": "diff --git a/django/db/backends/sqlite3/creation.py b/django/db/backends/sqlite3/creation.py\n--- a/django/db/backends/sqlite3/creation.py\n+++ b/django/db/backends/sqlite3/creation.py\n@@ -98,4 +98,6 @@ def test_db_signature(self):\n sig = [self.connection.settings_dict['NAME']]\n if self.is_in_memory_db(test_database_name):\n sig.append(self.connection.alias)\n+ else:\n+ sig.append(test_database_name)\n return tuple(sig)\n", + "test_patch": "diff --git a/tests/backends/sqlite/test_creation.py b/tests/backends/sqlite/test_creation.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/backends/sqlite/test_creation.py\n@@ -0,0 +1,18 @@\n+import copy\n+import unittest\n+\n+from django.db import connection\n+from django.test import SimpleTestCase\n+\n+\n+@unittest.skipUnless(connection.vendor == 'sqlite', 'SQLite tests')\n+class TestDbSignatureTests(SimpleTestCase):\n+ def test_custom_test_name(self):\n+ saved_settings = copy.deepcopy(connection.settings_dict)\n+ try:\n+ connection.settings_dict['NAME'] = None\n+ connection.settings_dict['TEST']['NAME'] = 'custom.sqlite.db'\n+ signature = connection.creation.test_db_signature()\n+ self.assertEqual(signature, (None, 'custom.sqlite.db'))\n+ finally:\n+ connection.settings_dict = saved_settings\n", + "fail_to_pass": "[\"test_custom_test_name (backends.sqlite.test_creation.TestDbSignatureTests)\"]", + "pass_to_pass": "[]", + "expected_spans": { + "django/db/backends/sqlite3/creation.py": [ + "DatabaseCreation", + "DatabaseCreation.test_db_signature" + ] + }, + "test_file_spans": {}, + "resolved_by": [ + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/backends/sqlite3/creation.py": [ + "DatabaseCreation._create_test_db", + "DatabaseCreation._destroy_test_db", + "DatabaseCreation", + "DatabaseCreation.test_db_signature" + ] + }, + "alternative_spans": { + "django/db/backends/sqlite3/creation.py": [ + "DatabaseCreation._create_test_db", + "DatabaseCreation._destroy_test_db", + "DatabaseCreation", + "DatabaseCreation.test_db_signature" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-12125", + "repo": "django/django", + "base_commit": "89d41cba392b759732ba9f1db4ff29ed47da6a56", + "problem_statement": "makemigrations produces incorrect path for inner classes\nDescription\n\t\nWhen you define a subclass from django.db.models.Field as an inner class of some other class, and use this field inside a django.db.models.Model class, then when you run manage.py makemigrations, a migrations file is created which refers to the inner class as if it were a top-level class of the module it is in.\nTo reproduce, create the following as your model:\nclass Outer(object):\n\tclass Inner(models.CharField):\n\t\tpass\nclass A(models.Model):\n\tfield = Outer.Inner(max_length=20)\nAfter running manage.py makemigrations, the generated migrations file contains the following:\nmigrations.CreateModel(\n\tname='A',\n\tfields=[\n\t\t('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n\t\t('field', test1.models.Inner(max_length=20)),\n\t],\n),\nNote the test1.models.Inner, which should have been test1.models.Outer.Inner.\nThe real life case involved an EnumField from django-enumfields, defined as an inner class of a Django Model class, similar to this:\nimport enum\nfrom enumfields import Enum, EnumField\nclass Thing(models.Model):\n\t@enum.unique\n\tclass State(Enum):\n\t\ton = 'on'\n\t\toff = 'off'\n\tstate = EnumField(enum=State)\nThis results in the following migrations code:\nmigrations.CreateModel(\n\tname='Thing',\n\tfields=[\n\t\t('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n\t\t('state', enumfields.fields.EnumField(enum=test1.models.State, max_length=10)),\n\t],\n),\nThis refers to test1.models.State, instead of to test1.models.Thing.State.\n", + "golden_patch": "diff --git a/django/db/migrations/serializer.py b/django/db/migrations/serializer.py\n--- a/django/db/migrations/serializer.py\n+++ b/django/db/migrations/serializer.py\n@@ -269,7 +269,7 @@ def serialize(self):\n if module == builtins.__name__:\n return self.value.__name__, set()\n else:\n- return \"%s.%s\" % (module, self.value.__name__), {\"import %s\" % module}\n+ return \"%s.%s\" % (module, self.value.__qualname__), {\"import %s\" % module}\n \n \n class UUIDSerializer(BaseSerializer):\n", + "test_patch": "diff --git a/tests/migrations/test_writer.py b/tests/migrations/test_writer.py\n--- a/tests/migrations/test_writer.py\n+++ b/tests/migrations/test_writer.py\n@@ -26,6 +26,11 @@\n from .models import FoodManager, FoodQuerySet\n \n \n+class DeconstructibleInstances:\n+ def deconstruct(self):\n+ return ('DeconstructibleInstances', [], {})\n+\n+\n class Money(decimal.Decimal):\n def deconstruct(self):\n return (\n@@ -188,6 +193,10 @@ class NestedEnum(enum.IntEnum):\n A = 1\n B = 2\n \n+ class NestedChoices(models.TextChoices):\n+ X = 'X', 'X value'\n+ Y = 'Y', 'Y value'\n+\n def safe_exec(self, string, value=None):\n d = {}\n try:\n@@ -383,6 +392,18 @@ class DateChoices(datetime.date, models.Choices):\n \"default=datetime.date(1969, 11, 19))\"\n )\n \n+ def test_serialize_nested_class(self):\n+ for nested_cls in [self.NestedEnum, self.NestedChoices]:\n+ cls_name = nested_cls.__name__\n+ with self.subTest(cls_name):\n+ self.assertSerializedResultEqual(\n+ nested_cls,\n+ (\n+ \"migrations.test_writer.WriterTests.%s\" % cls_name,\n+ {'import migrations.test_writer'},\n+ ),\n+ )\n+\n def test_serialize_uuid(self):\n self.assertSerializedEqual(uuid.uuid1())\n self.assertSerializedEqual(uuid.uuid4())\n@@ -726,10 +747,6 @@ def test_deconstruct_class_arguments(self):\n # Yes, it doesn't make sense to use a class as a default for a\n # CharField. It does make sense for custom fields though, for example\n # an enumfield that takes the enum class as an argument.\n- class DeconstructibleInstances:\n- def deconstruct(self):\n- return ('DeconstructibleInstances', [], {})\n-\n string = MigrationWriter.serialize(models.CharField(default=DeconstructibleInstances))[0]\n self.assertEqual(string, \"models.CharField(default=migrations.test_writer.DeconstructibleInstances)\")\n \n", + "fail_to_pass": "[\"test_serialize_nested_class (migrations.test_writer.WriterTests)\", \"test_serialize_numbers (migrations.test_writer.WriterTests)\"]", + "pass_to_pass": "[\"test_args_kwargs_signature (migrations.test_writer.OperationWriterTests)\", \"test_args_signature (migrations.test_writer.OperationWriterTests)\", \"test_empty_signature (migrations.test_writer.OperationWriterTests)\", \"test_expand_args_signature (migrations.test_writer.OperationWriterTests)\", \"test_kwargs_signature (migrations.test_writer.OperationWriterTests)\", \"test_multiline_args_signature (migrations.test_writer.OperationWriterTests)\", \"test_nested_args_signature (migrations.test_writer.OperationWriterTests)\", \"test_nested_operation_expand_args_signature (migrations.test_writer.OperationWriterTests)\", \"test_custom_operation (migrations.test_writer.WriterTests)\", \"test_deconstruct_class_arguments (migrations.test_writer.WriterTests)\", \"test_migration_file_header_comments (migrations.test_writer.WriterTests)\", \"test_migration_path (migrations.test_writer.WriterTests)\", \"test_models_import_omitted (migrations.test_writer.WriterTests)\", \"test_register_non_serializer (migrations.test_writer.WriterTests)\", \"test_register_serializer (migrations.test_writer.WriterTests)\", \"test_serialize_builtin_types (migrations.test_writer.WriterTests)\", \"test_serialize_builtins (migrations.test_writer.WriterTests)\", \"test_serialize_choices (migrations.test_writer.WriterTests)\", \"test_serialize_class_based_validators (migrations.test_writer.WriterTests)\", \"test_serialize_collections (migrations.test_writer.WriterTests)\", \"test_serialize_compiled_regex (migrations.test_writer.WriterTests)\", \"test_serialize_constants (migrations.test_writer.WriterTests)\", \"test_serialize_datetime (migrations.test_writer.WriterTests)\", \"test_serialize_empty_nonempty_tuple (migrations.test_writer.WriterTests)\", \"test_serialize_enums (migrations.test_writer.WriterTests)\", \"test_serialize_fields (migrations.test_writer.WriterTests)\", \"test_serialize_frozensets (migrations.test_writer.WriterTests)\", \"test_serialize_functions (migrations.test_writer.WriterTests)\", \"test_serialize_functools_partial (migrations.test_writer.WriterTests)\", \"test_serialize_functools_partialmethod (migrations.test_writer.WriterTests)\", \"test_serialize_iterators (migrations.test_writer.WriterTests)\", \"test_serialize_lazy_objects (migrations.test_writer.WriterTests)\", \"A reference in a local scope can't be serialized.\", \"test_serialize_managers (migrations.test_writer.WriterTests)\", \"test_serialize_multiline_strings (migrations.test_writer.WriterTests)\", \"test_serialize_range (migrations.test_writer.WriterTests)\", \"test_serialize_set (migrations.test_writer.WriterTests)\", \"test_serialize_settings (migrations.test_writer.WriterTests)\", \"test_serialize_strings (migrations.test_writer.WriterTests)\", \"test_serialize_timedelta (migrations.test_writer.WriterTests)\", \"test_serialize_type_none (migrations.test_writer.WriterTests)\", \"An unbound method used within a class body can be serialized.\", \"test_serialize_uuid (migrations.test_writer.WriterTests)\", \"test_simple_migration (migrations.test_writer.WriterTests)\", \"test_sorted_imports (migrations.test_writer.WriterTests)\"]", + "expected_spans": { + "django/db/migrations/serializer.py": [ + "TypeSerializer", + "TypeSerializer.serialize" + ] + }, + "test_file_spans": { + "tests/migrations/test_writer.py": [ + "Money", + "WriterTests.safe_exec", + "WriterTests.test_serialize_uuid", + "WriterTests.test_deconstruct_class_arguments" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/db/migrations/writer.py": [ + "MigrationWriter.serialize" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/db/migrations/serializer.py": [ + "TypeSerializer", + "TypeSerializer.serialize" + ] + }, + "alternative_spans": { + "django/db/migrations/serializer.py": [ + "TypeSerializer", + "TypeSerializer.serialize" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/db/migrations/serializer.py": [ + "TypeSerializer.serialize", + "TypeSerializer" + ] + }, + "alternative_spans": { + "django/db/migrations/serializer.py": [ + "TypeSerializer.serialize", + "TypeSerializer" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/db/migrations/serializer.py": [ + "TypeSerializer", + "TypeSerializer.serialize" + ] + }, + "alternative_spans": { + "django/db/migrations/serializer.py": [ + "TypeSerializer", + "TypeSerializer.serialize" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-12184", + "repo": "django/django", + "base_commit": "5d674eac871a306405b0fbbaeb17bbeba9c68bf3", + "problem_statement": "Optional URL params crash some view functions.\nDescription\n\t\nMy use case, running fine with Django until 2.2:\nURLConf:\nurlpatterns += [\n\t...\n\tre_path(r'^module/(?P(html|json|xml))?/?$', views.modules, name='modules'),\n]\nView:\ndef modules(request, format='html'):\n\t...\n\treturn render(...)\nWith Django 3.0, this is now producing an error:\nTraceback (most recent call last):\n File \"/l10n/venv/lib/python3.6/site-packages/django/core/handlers/exception.py\", line 34, in inner\n\tresponse = get_response(request)\n File \"/l10n/venv/lib/python3.6/site-packages/django/core/handlers/base.py\", line 115, in _get_response\n\tresponse = self.process_exception_by_middleware(e, request)\n File \"/l10n/venv/lib/python3.6/site-packages/django/core/handlers/base.py\", line 113, in _get_response\n\tresponse = wrapped_callback(request, *callback_args, **callback_kwargs)\nException Type: TypeError at /module/\nException Value: modules() takes from 1 to 2 positional arguments but 3 were given\n", + "golden_patch": "diff --git a/django/urls/resolvers.py b/django/urls/resolvers.py\n--- a/django/urls/resolvers.py\n+++ b/django/urls/resolvers.py\n@@ -158,8 +158,9 @@ def match(self, path):\n # If there are any named groups, use those as kwargs, ignoring\n # non-named groups. Otherwise, pass all non-named arguments as\n # positional arguments.\n- kwargs = {k: v for k, v in match.groupdict().items() if v is not None}\n+ kwargs = match.groupdict()\n args = () if kwargs else match.groups()\n+ kwargs = {k: v for k, v in kwargs.items() if v is not None}\n return path[match.end():], args, kwargs\n return None\n \n", + "test_patch": "diff --git a/tests/urlpatterns/path_urls.py b/tests/urlpatterns/path_urls.py\n--- a/tests/urlpatterns/path_urls.py\n+++ b/tests/urlpatterns/path_urls.py\n@@ -12,6 +12,11 @@\n path('included_urls/', include('urlpatterns.included_urls')),\n re_path(r'^regex/(?P[0-9]+)/$', views.empty_view, name='regex'),\n re_path(r'^regex_optional/(?P\\d+)/(?:(?P\\d+)/)?', views.empty_view, name='regex_optional'),\n+ re_path(\n+ r'^regex_only_optional/(?:(?P\\d+)/)?',\n+ views.empty_view,\n+ name='regex_only_optional',\n+ ),\n path('', include('urlpatterns.more_urls')),\n path('//', views.empty_view, name='lang-and-path'),\n ]\ndiff --git a/tests/urlpatterns/tests.py b/tests/urlpatterns/tests.py\n--- a/tests/urlpatterns/tests.py\n+++ b/tests/urlpatterns/tests.py\n@@ -68,6 +68,16 @@ def test_re_path_with_optional_parameter(self):\n r'^regex_optional/(?P\\d+)/(?:(?P\\d+)/)?',\n )\n \n+ def test_re_path_with_missing_optional_parameter(self):\n+ match = resolve('/regex_only_optional/')\n+ self.assertEqual(match.url_name, 'regex_only_optional')\n+ self.assertEqual(match.kwargs, {})\n+ self.assertEqual(match.args, ())\n+ self.assertEqual(\n+ match.route,\n+ r'^regex_only_optional/(?:(?P\\d+)/)?',\n+ )\n+\n def test_path_lookup_with_inclusion(self):\n match = resolve('/included_urls/extra/something/')\n self.assertEqual(match.url_name, 'inner-extra')\n", + "fail_to_pass": "[\"test_re_path_with_missing_optional_parameter (urlpatterns.tests.SimplifiedURLTests)\"]", + "pass_to_pass": "[\"test_allows_non_ascii_but_valid_identifiers (urlpatterns.tests.ParameterRestrictionTests)\", \"test_non_identifier_parameter_name_causes_exception (urlpatterns.tests.ParameterRestrictionTests)\", \"test_matching_urls (urlpatterns.tests.ConverterTests)\", \"test_nonmatching_urls (urlpatterns.tests.ConverterTests)\", \"test_resolve_type_error_propagates (urlpatterns.tests.ConversionExceptionTests)\", \"test_resolve_value_error_means_no_match (urlpatterns.tests.ConversionExceptionTests)\", \"test_reverse_value_error_propagates (urlpatterns.tests.ConversionExceptionTests)\", \"test_converter_resolve (urlpatterns.tests.SimplifiedURLTests)\", \"test_converter_reverse (urlpatterns.tests.SimplifiedURLTests)\", \"test_converter_reverse_with_second_layer_instance_namespace (urlpatterns.tests.SimplifiedURLTests)\", \"test_invalid_converter (urlpatterns.tests.SimplifiedURLTests)\", \"test_path_inclusion_is_matchable (urlpatterns.tests.SimplifiedURLTests)\", \"test_path_inclusion_is_reversible (urlpatterns.tests.SimplifiedURLTests)\", \"test_path_lookup_with_double_inclusion (urlpatterns.tests.SimplifiedURLTests)\", \"test_path_lookup_with_empty_string_inclusion (urlpatterns.tests.SimplifiedURLTests)\", \"test_path_lookup_with_inclusion (urlpatterns.tests.SimplifiedURLTests)\", \"test_path_lookup_with_multiple_parameters (urlpatterns.tests.SimplifiedURLTests)\", \"test_path_lookup_with_typed_parameters (urlpatterns.tests.SimplifiedURLTests)\", \"test_path_lookup_without_parameters (urlpatterns.tests.SimplifiedURLTests)\", \"test_path_reverse_with_parameter (urlpatterns.tests.SimplifiedURLTests)\", \"test_path_reverse_without_parameter (urlpatterns.tests.SimplifiedURLTests)\", \"test_re_path (urlpatterns.tests.SimplifiedURLTests)\", \"test_re_path_with_optional_parameter (urlpatterns.tests.SimplifiedURLTests)\", \"test_space_in_route (urlpatterns.tests.SimplifiedURLTests)\", \"test_two_variable_at_start_of_path_pattern (urlpatterns.tests.SimplifiedURLTests)\"]", + "expected_spans": { + "django/urls/resolvers.py": [ + "RegexPattern.match" + ] + }, + "test_file_spans": { + "tests/urlpatterns/path_urls.py": [ + "imports" + ], + "tests/urlpatterns/tests.py": [ + "SimplifiedURLTests.test_path_lookup_with_inclusion" + ] + }, + "resolved_by": [ + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/urls/resolvers.py": [ + "imports", + "ResolverMatch.__init__", + "ResolverMatch", + "ResolverMatch.__repr__", + "_get_cached_resolver", + "get_ns_resolver", + "LocaleRegexDescriptor", + "LocaleRegexDescriptor.__get__", + "CheckURLMixin", + "CheckURLMixin._check_pattern_startswith_slash", + "RegexPattern.match", + "RegexPattern._check_include_trailing_dollar", + "RegexPattern._compile", + "RegexPattern.__str__", + "impl", + "_route_to_regex", + "RoutePattern.match", + "RoutePattern.check", + "LocalePrefixPattern.__init__", + "LocalePrefixPattern.regex", + "LocalePrefixPattern.language_prefix", + "URLPattern.__init__", + "URLPattern._check_pattern_name", + "URLPattern.resolve", + "URLPattern.lookup_str", + "URLResolver.__init__", + "URLResolver.__repr__", + "URLResolver._check_custom_error_handlers", + "URLResolver._populate", + "URLResolver.resolve", + "URLResolver.url_patterns", + "URLResolver.resolve_error_handler", + "URLResolver._reverse_with_prefix" + ] + }, + "alternative_spans": { + "django/urls/resolvers.py": [ + "imports", + "ResolverMatch.__init__", + "ResolverMatch", + "ResolverMatch.__repr__", + "_get_cached_resolver", + "get_ns_resolver", + "LocaleRegexDescriptor", + "LocaleRegexDescriptor.__get__", + "CheckURLMixin", + "CheckURLMixin._check_pattern_startswith_slash", + "RegexPattern.match", + "RegexPattern._check_include_trailing_dollar", + "RegexPattern._compile", + "RegexPattern.__str__", + "impl", + "_route_to_regex", + "RoutePattern.match", + "RoutePattern.check", + "LocalePrefixPattern.__init__", + "LocalePrefixPattern.regex", + "LocalePrefixPattern.language_prefix", + "URLPattern.__init__", + "URLPattern._check_pattern_name", + "URLPattern.resolve", + "URLPattern.lookup_str", + "URLResolver.__init__", + "URLResolver.__repr__", + "URLResolver._check_custom_error_handlers", + "URLResolver._populate", + "URLResolver.resolve", + "URLResolver.url_patterns", + "URLResolver.resolve_error_handler", + "URLResolver._reverse_with_prefix" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/urls/resolvers.py": [ + "RegexPattern.match" + ] + }, + "alternative_spans": { + "django/urls/resolvers.py": [ + "RegexPattern.match" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/urls/resolvers.py": [ + "URLResolver.resolve" + ] + }, + "alternative_spans": { + "django/urls/resolvers.py": [ + "URLResolver.resolve" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240721_amazon-q-developer-agent-20240719-dev", + "spans": { + "django/urls/resolvers.py": [ + "URLResolver.resolve" + ] + } + } + ] + }, + { + "instance_id": "sympy__sympy-18057", + "repo": "sympy/sympy", + "base_commit": "62000f37b8821573ba00280524ffb4ac4a380875", + "problem_statement": "Sympy incorrectly attempts to eval reprs in its __eq__ method\nPassing strings produced by unknown objects into eval is **very bad**. It is especially surprising for an equality check to trigger that kind of behavior. This should be fixed ASAP.\r\n\r\nRepro code:\r\n\r\n```\r\nimport sympy\r\nclass C:\r\n def __repr__(self):\r\n return 'x.y'\r\n_ = sympy.Symbol('x') == C()\r\n```\r\n\r\nResults in:\r\n\r\n```\r\nE AttributeError: 'Symbol' object has no attribute 'y'\r\n```\r\n\r\nOn the line:\r\n\r\n```\r\n expr = eval(\r\n code, global_dict, local_dict) # take local objects in preference\r\n```\r\n\r\nWhere code is:\r\n\r\n```\r\nSymbol ('x' ).y\r\n```\r\n\r\nFull trace:\r\n\r\n```\r\nFAILED [100%]\r\n class C:\r\n def __repr__(self):\r\n return 'x.y'\r\n \r\n> _ = sympy.Symbol('x') == C()\r\n\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\nsympy/core/expr.py:124: in __eq__\r\n other = sympify(other)\r\nsympy/core/sympify.py:385: in sympify\r\n expr = parse_expr(a, local_dict=locals, transformations=transformations, evaluate=evaluate)\r\nsympy/parsing/sympy_parser.py:1011: in parse_expr\r\n return eval_expr(code, local_dict, global_dict)\r\nsympy/parsing/sympy_parser.py:906: in eval_expr\r\n code, global_dict, local_dict) # take local objects in preference\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n\r\n> ???\r\nE AttributeError: 'Symbol' object has no attribute 'y'\r\n\r\n:1: AttributeError\r\n```\r\n\r\nRelated issue: an unknown object whose repr is `x` will incorrectly compare as equal to a sympy symbol x:\r\n\r\n```\r\n class C:\r\n def __repr__(self):\r\n return 'x'\r\n\r\n assert sympy.Symbol('x') != C() # fails\r\n```\n", + "golden_patch": "diff --git a/sympy/core/expr.py b/sympy/core/expr.py\n--- a/sympy/core/expr.py\n+++ b/sympy/core/expr.py\n@@ -121,7 +121,7 @@ def _hashable_content(self):\n \n def __eq__(self, other):\n try:\n- other = sympify(other)\n+ other = _sympify(other)\n if not isinstance(other, Expr):\n return False\n except (SympifyError, SyntaxError):\n", + "test_patch": "diff --git a/sympy/core/tests/test_expr.py b/sympy/core/tests/test_expr.py\n--- a/sympy/core/tests/test_expr.py\n+++ b/sympy/core/tests/test_expr.py\n@@ -1903,3 +1903,24 @@ def test_ExprBuilder():\n eb = ExprBuilder(Mul)\n eb.args.extend([x, x])\n assert eb.build() == x**2\n+\n+def test_non_string_equality():\n+ # Expressions should not compare equal to strings\n+ x = symbols('x')\n+ one = sympify(1)\n+ assert (x == 'x') is False\n+ assert (x != 'x') is True\n+ assert (one == '1') is False\n+ assert (one != '1') is True\n+ assert (x + 1 == 'x + 1') is False\n+ assert (x + 1 != 'x + 1') is True\n+\n+ # Make sure == doesn't try to convert the resulting expression to a string\n+ # (e.g., by calling sympify() instead of _sympify())\n+\n+ class BadRepr(object):\n+ def __repr__(self):\n+ raise RuntimeError\n+\n+ assert (x == BadRepr()) is False\n+ assert (x != BadRepr()) is True\ndiff --git a/sympy/core/tests/test_var.py b/sympy/core/tests/test_var.py\n--- a/sympy/core/tests/test_var.py\n+++ b/sympy/core/tests/test_var.py\n@@ -19,7 +19,8 @@ def test_var():\n assert ns['fg'] == Symbol('fg')\n \n # check return value\n- assert v == ['d', 'e', 'fg']\n+ assert v != ['d', 'e', 'fg']\n+ assert v == [Symbol('d'), Symbol('e'), Symbol('fg')]\n \n \n def test_var_return():\n", + "fail_to_pass": "[\"test_var\"]", + "pass_to_pass": "[\"test_basic\", \"test_ibasic\", \"test_relational\", \"test_relational_assumptions\", \"test_basic_nostr\", \"test_series_expansion_for_uniform_order\", \"test_leadterm\", \"test_as_leading_term\", \"test_leadterm2\", \"test_leadterm3\", \"test_as_leading_term2\", \"test_as_leading_term3\", \"test_as_leading_term4\", \"test_as_leading_term_stub\", \"test_as_leading_term_deriv_integral\", \"test_atoms\", \"test_is_polynomial\", \"test_is_rational_function\", \"test_is_algebraic_expr\", \"test_SAGE1\", \"test_SAGE2\", \"test_SAGE3\", \"test_len\", \"test_doit\", \"test_attribute_error\", \"test_args\", \"test_noncommutative_expand_issue_3757\", \"test_as_numer_denom\", \"test_trunc\", \"test_as_independent\", \"test_replace\", \"test_find\", \"test_count\", \"test_has_basics\", \"test_has_multiple\", \"test_has_piecewise\", \"test_has_iterative\", \"test_has_integrals\", \"test_has_tuple\", \"test_has_units\", \"test_has_polys\", \"test_has_physics\", \"test_as_poly_as_expr\", \"test_nonzero\", \"test_is_number\", \"test_as_coeff_add\", \"test_as_coeff_mul\", \"test_as_coeff_exponent\", \"test_extractions\", \"test_nan_extractions\", \"test_coeff\", \"test_coeff2\", \"test_coeff2_0\", \"test_coeff_expand\", \"test_integrate\", \"test_as_base_exp\", \"test_issue_4963\", \"test_action_verbs\", \"test_as_powers_dict\", \"test_as_coefficients_dict\", \"test_args_cnc\", \"test_new_rawargs\", \"test_issue_5226\", \"test_free_symbols\", \"test_issue_5300\", \"test_floordiv\", \"test_as_coeff_Mul\", \"test_as_coeff_Add\", \"test_expr_sorting\", \"test_as_ordered_factors\", \"test_as_ordered_terms\", \"test_sort_key_atomic_expr\", \"test_eval_interval\", \"test_eval_interval_zoo\", \"test_primitive\", \"test_issue_5843\", \"test_is_constant\", \"test_equals\", \"test_random\", \"test_round\", \"test_held_expression_UnevaluatedExpr\", \"test_round_exception_nostr\", \"test_extract_branch_factor\", \"test_identity_removal\", \"test_float_0\", \"test_issue_6325\", \"test_issue_7426\", \"test_issue_11122\", \"test_issue_10651\", \"test_issue_10161\", \"test_issue_10755\", \"test_issue_11877\", \"test_normal\", \"test_expr\", \"test_ExprBuilder\", \"test_var_return\", \"test_var_accepts_comma\", \"test_var_keywords\"]", + "expected_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + }, + "test_file_spans": { + "sympy/core/tests/test_expr.py": [], + "sympy/core/tests/test_var.py": [ + "test_var" + ] + }, + "resolved_by": [ + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + }, + "alternative_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + }, + "alternative_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + }, + "alternative_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ], + "sympy/core/sympify.py": [ + "sympify" + ], + "sympy/parsing/sympy_parser.py": [ + "eval_expr" + ] + }, + "alternative_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + }, + "alternative_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sympy/core/expr.py": [ + "imports", + "Expr", + "Expr.sort_key", + "Expr.__hash__", + "Expr.__eq__", + "Expr:7", + "Expr.__pos__", + "Expr.__neg__", + "Expr.__pow__", + "Expr.__rdiv__", + "Expr.__rfloordiv__", + "Expr.__int__", + "Expr.__float__", + "Expr._cmp", + "Expr.__ge__", + "Expr.__le__", + "Expr.__gt__", + "Expr.__lt__", + "Expr._from_mpmath", + "Expr._random", + "Expr.is_constant", + "Expr.equals", + "Expr._eval_is_extended_positive_negative", + "Expr._eval_interval", + "Expr._eval_power", + "Expr._eval_transpose", + "Expr._parse_order", + "Expr.as_ordered_terms", + "Expr.as_terms", + "Expr.getn", + "Expr.args_cnc", + "Expr.coeff", + "Expr.as_coefficient", + "Expr.as_independent", + "Expr.as_real_imag", + "Expr.as_base_exp", + "Expr.as_numer_denom", + "Expr.normal", + "Expr.extract_multiplicatively", + "Expr.extract_additively", + "Expr.could_extract_minus_sign", + "Expr.extract_branch_factor", + "Expr.is_polynomial", + "Expr.is_rational_function", + "Expr.is_algebraic_expr", + "Expr.series", + "Expr.aseries", + "Expr.taylor_term", + "Expr._eval_lseries", + "Expr.nseries", + "Expr._eval_nseries", + "Expr.compute_leading_term", + "Expr.leadterm", + "Expr.fps", + "Expr.fourier_series", + "Expr.diff", + "Expr._eval_expand_complex", + "Expr._expand_hint", + "Expr.expand", + "Expr.collect", + "Expr.round", + "Expr:15", + "Expr._eval_derivative_matrix_lines", + "AtomicExpr", + "_mag", + "UnevaluatedExpr.doit", + "_n2", + "ExprBuilder.__init__", + "ExprBuilder._build_args", + "ExprBuilder.__getitem__", + "ExprBuilder.search_element" + ] + }, + "alternative_spans": { + "sympy/core/expr.py": [ + "imports", + "Expr", + "Expr.sort_key", + "Expr.__hash__", + "Expr.__eq__", + "Expr:7", + "Expr.__pos__", + "Expr.__neg__", + "Expr.__pow__", + "Expr.__rdiv__", + "Expr.__rfloordiv__", + "Expr.__int__", + "Expr.__float__", + "Expr._cmp", + "Expr.__ge__", + "Expr.__le__", + "Expr.__gt__", + "Expr.__lt__", + "Expr._from_mpmath", + "Expr._random", + "Expr.is_constant", + "Expr.equals", + "Expr._eval_is_extended_positive_negative", + "Expr._eval_interval", + "Expr._eval_power", + "Expr._eval_transpose", + "Expr._parse_order", + "Expr.as_ordered_terms", + "Expr.as_terms", + "Expr.getn", + "Expr.args_cnc", + "Expr.coeff", + "Expr.as_coefficient", + "Expr.as_independent", + "Expr.as_real_imag", + "Expr.as_base_exp", + "Expr.as_numer_denom", + "Expr.normal", + "Expr.extract_multiplicatively", + "Expr.extract_additively", + "Expr.could_extract_minus_sign", + "Expr.extract_branch_factor", + "Expr.is_polynomial", + "Expr.is_rational_function", + "Expr.is_algebraic_expr", + "Expr.series", + "Expr.aseries", + "Expr.taylor_term", + "Expr._eval_lseries", + "Expr.nseries", + "Expr._eval_nseries", + "Expr.compute_leading_term", + "Expr.leadterm", + "Expr.fps", + "Expr.fourier_series", + "Expr.diff", + "Expr._eval_expand_complex", + "Expr._expand_hint", + "Expr.expand", + "Expr.collect", + "Expr.round", + "Expr:15", + "Expr._eval_derivative_matrix_lines", + "AtomicExpr", + "_mag", + "UnevaluatedExpr.doit", + "_n2", + "ExprBuilder.__init__", + "ExprBuilder._build_args", + "ExprBuilder.__getitem__", + "ExprBuilder.search_element" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ], + "sympy/utilities/pytest.py": [ + "imports" + ] + }, + "alternative_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + }, + "alternative_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + }, + "alternative_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + }, + "alternative_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + }, + "alternative_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + }, + "alternative_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ], + "sympy/core/sympify.py": [ + "sympify" + ] + }, + "alternative_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ], + "sympy/core/sympify.py": [ + "sympify" + ] + }, + "alternative_spans": { + "sympy/core/expr.py": [ + "Expr.__eq__" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-18087", + "repo": "sympy/sympy", + "base_commit": "9da013ad0ddc3cd96fe505f2e47c63e372040916", + "problem_statement": "Simplify of simple trig expression fails\ntrigsimp in various versions, including 1.5, incorrectly simplifies cos(x)+sqrt(sin(x)**2) as though it were cos(x)+sin(x) for general complex x. (Oddly it gets this right if x is real.)\r\n\r\nEmbarrassingly I found this by accident while writing sympy-based teaching material...\r\n\n", + "golden_patch": "diff --git a/sympy/core/exprtools.py b/sympy/core/exprtools.py\n--- a/sympy/core/exprtools.py\n+++ b/sympy/core/exprtools.py\n@@ -358,8 +358,8 @@ def __init__(self, factors=None): # Factors\n for f in list(factors.keys()):\n if isinstance(f, Rational) and not isinstance(f, Integer):\n p, q = Integer(f.p), Integer(f.q)\n- factors[p] = (factors[p] if p in factors else 0) + factors[f]\n- factors[q] = (factors[q] if q in factors else 0) - factors[f]\n+ factors[p] = (factors[p] if p in factors else S.Zero) + factors[f]\n+ factors[q] = (factors[q] if q in factors else S.Zero) - factors[f]\n factors.pop(f)\n if i:\n factors[I] = S.One*i\n@@ -448,14 +448,12 @@ def as_expr(self): # Factors\n args = []\n for factor, exp in self.factors.items():\n if exp != 1:\n- b, e = factor.as_base_exp()\n- if isinstance(exp, int):\n- e = _keep_coeff(Integer(exp), e)\n- elif isinstance(exp, Rational):\n+ if isinstance(exp, Integer):\n+ b, e = factor.as_base_exp()\n e = _keep_coeff(exp, e)\n+ args.append(b**e)\n else:\n- e *= exp\n- args.append(b**e)\n+ args.append(factor**exp)\n else:\n args.append(factor)\n return Mul(*args)\n", + "test_patch": "diff --git a/sympy/core/tests/test_exprtools.py b/sympy/core/tests/test_exprtools.py\n--- a/sympy/core/tests/test_exprtools.py\n+++ b/sympy/core/tests/test_exprtools.py\n@@ -27,6 +27,8 @@ def test_Factors():\n assert Factors({x: 2, y: 3, sin(x): 4}).as_expr() == x**2*y**3*sin(x)**4\n assert Factors(S.Infinity) == Factors({oo: 1})\n assert Factors(S.NegativeInfinity) == Factors({oo: 1, -1: 1})\n+ # issue #18059:\n+ assert Factors((x**2)**S.Half).as_expr() == (x**2)**S.Half\n \n a = Factors({x: 5, y: 3, z: 7})\n b = Factors({ y: 4, z: 3, t: 10})\ndiff --git a/sympy/simplify/tests/test_fu.py b/sympy/simplify/tests/test_fu.py\n--- a/sympy/simplify/tests/test_fu.py\n+++ b/sympy/simplify/tests/test_fu.py\n@@ -276,6 +276,9 @@ def test_fu():\n expr = Mul(*[cos(2**i) for i in range(10)])\n assert fu(expr) == sin(1024)/(1024*sin(1))\n \n+ # issue #18059:\n+ assert fu(cos(x) + sqrt(sin(x)**2)) == cos(x) + sqrt(sin(x)**2)\n+\n \n def test_objective():\n assert fu(sin(x)/cos(x), measure=lambda x: x.count_ops()) == \\\n", + "fail_to_pass": "[\"test_Factors\", \"test_fu\"]", + "pass_to_pass": "[\"test_decompose_power\", \"test_Term\", \"test_gcd_terms\", \"test_factor_terms\", \"test_xreplace\", \"test_factor_nc\", \"test_issue_6360\", \"test_issue_7903\", \"test_issue_8263\", \"test_monotonic_sign\", \"test_TR1\", \"test_TR2\", \"test_TR2i\", \"test_TR3\", \"test__TR56\", \"test_TR5\", \"test_TR6\", \"test_TR7\", \"test_TR8\", \"test_TR9\", \"test_TR10\", \"test_TR10i\", \"test_TR11\", \"test_TR12\", \"test_TR13\", \"test_L\", \"test_objective\", \"test_process_common_addends\", \"test_trig_split\", \"test_TRmorrie\", \"test_TRpower\", \"test_hyper_as_trig\", \"test_TR12i\", \"test_TR14\", \"test_TR15_16_17\"]", + "expected_spans": { + "sympy/core/exprtools.py": [ + "Factors.__init__", + "Factors.as_expr" + ] + }, + "test_file_spans": { + "sympy/core/tests/test_exprtools.py": [ + "test_Factors" + ], + "sympy/simplify/tests/test_fu.py": [ + "test_fu" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-18189", + "repo": "sympy/sympy", + "base_commit": "1923822ddf8265199dbd9ef9ce09641d3fd042b9", + "problem_statement": "diophantine: incomplete results depending on syms order with permute=True\n```\r\nIn [10]: diophantine(n**4 + m**4 - 2**4 - 3**4, syms=(m,n), permute=True)\r\nOut[10]: {(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)}\r\n\r\nIn [11]: diophantine(n**4 + m**4 - 2**4 - 3**4, syms=(n,m), permute=True)\r\nOut[11]: {(3, 2)}\r\n```\r\n\ndiophantine: incomplete results depending on syms order with permute=True\n```\r\nIn [10]: diophantine(n**4 + m**4 - 2**4 - 3**4, syms=(m,n), permute=True)\r\nOut[10]: {(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)}\r\n\r\nIn [11]: diophantine(n**4 + m**4 - 2**4 - 3**4, syms=(n,m), permute=True)\r\nOut[11]: {(3, 2)}\r\n```\r\n\n", + "golden_patch": "diff --git a/sympy/solvers/diophantine.py b/sympy/solvers/diophantine.py\n--- a/sympy/solvers/diophantine.py\n+++ b/sympy/solvers/diophantine.py\n@@ -182,7 +182,7 @@ def diophantine(eq, param=symbols(\"t\", integer=True), syms=None,\n if syms != var:\n dict_sym_index = dict(zip(syms, range(len(syms))))\n return {tuple([t[dict_sym_index[i]] for i in var])\n- for t in diophantine(eq, param)}\n+ for t in diophantine(eq, param, permute=permute)}\n n, d = eq.as_numer_denom()\n if n.is_number:\n return set()\n", + "test_patch": "diff --git a/sympy/solvers/tests/test_diophantine.py b/sympy/solvers/tests/test_diophantine.py\n--- a/sympy/solvers/tests/test_diophantine.py\n+++ b/sympy/solvers/tests/test_diophantine.py\n@@ -547,6 +547,13 @@ def test_diophantine():\n assert diophantine(x**2 + y**2 +3*x- 5, permute=True) == \\\n set([(-1, 1), (-4, -1), (1, -1), (1, 1), (-4, 1), (-1, -1), (4, 1), (4, -1)])\n \n+\n+ #test issue 18186\n+ assert diophantine(y**4 + x**4 - 2**4 - 3**4, syms=(x, y), permute=True) == \\\n+ set([(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)])\n+ assert diophantine(y**4 + x**4 - 2**4 - 3**4, syms=(y, x), permute=True) == \\\n+ set([(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)])\n+\n # issue 18122\n assert check_solutions(x**2-y)\n assert check_solutions(y**2-x)\n@@ -554,6 +561,7 @@ def test_diophantine():\n assert diophantine((y**2-x), t) == set([(t**2, -t)])\n \n \n+\n def test_general_pythagorean():\n from sympy.abc import a, b, c, d, e\n \n", + "fail_to_pass": "[\"test_diophantine\"]", + "pass_to_pass": "[\"test_input_format\", \"test_univariate\", \"test_classify_diop\", \"test_linear\", \"test_quadratic_simple_hyperbolic_case\", \"test_quadratic_elliptical_case\", \"test_quadratic_parabolic_case\", \"test_quadratic_perfect_square\", \"test_quadratic_non_perfect_square\", \"test_issue_9106\", \"test_issue_18138\", \"test_DN\", \"test_bf_pell\", \"test_length\", \"test_transformation_to_pell\", \"test_find_DN\", \"test_ldescent\", \"test_diop_ternary_quadratic_normal\", \"test_transformation_to_normal\", \"test_diop_ternary_quadratic\", \"test_square_factor\", \"test_parametrize_ternary_quadratic\", \"test_no_square_ternary_quadratic\", \"test_descent\", \"test_general_pythagorean\", \"test_diop_general_sum_of_squares_quick\", \"test_diop_partition\", \"test_prime_as_sum_of_two_squares\", \"test_sum_of_three_squares\", \"test_sum_of_four_squares\", \"test_power_representation\", \"test_assumptions\", \"test_diopcoverage\", \"test_holzer\", \"test_issue_9539\", \"test_issue_8943\", \"test_diop_sum_of_even_powers\", \"test_sum_of_squares_powers\", \"test__can_do_sum_of_squares\", \"test_diophantine_permute_sign\", \"test_issue_9538\"]", + "expected_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + }, + "test_file_spans": { + "sympy/solvers/tests/test_diophantine.py": [ + "test_diophantine", + "test_general_pythagorean" + ] + }, + "resolved_by": [ + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ], + "sympy/utilities/iterables.py": [ + "permute_signs", + "signed_permutations" + ] + }, + "alternative_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sympy/solvers/diophantine.py": [ + "imports", + "_remove_gcd", + "_rational_pq", + "_nint_or_floor", + "diophantine", + "merge_solution", + "diop_solve", + "classify_diop", + "impl:5", + "diop_linear", + "_diop_linear", + "base_solution_linear", + "diop_quadratic", + "_diop_quadratic", + "is_solution_quad", + "diop_DN", + "_special_diop_DN", + "cornacchia", + "PQa", + "diop_bf_DN", + "equivalent", + "length", + "transformation_to_DN", + "_transformation_to_DN", + "find_DN", + "_find_DN", + "check_param", + "diop_ternary_quadratic", + "_diop_ternary_quadratic", + "transformation_to_normal", + "_transformation_to_normal", + "parametrize_ternary_quadratic", + "_parametrize_ternary_quadratic", + "diop_ternary_quadratic_normal", + "_diop_ternary_quadratic_normal", + "sqf_normal", + "square_factor", + "ldescent", + "descent", + "gaussian_reduce", + "dot", + "norm", + "holzer", + "diop_general_pythagorean", + "_diop_general_pythagorean", + "diop_general_sum_of_squares", + "_diop_general_sum_of_squares", + "diop_general_sum_of_even_powers", + "_diop_general_sum_of_even_powers", + "partition", + "prime_as_sum_of_two_squares", + "sum_of_three_squares", + "sum_of_four_squares", + "power_representation", + "pow_rep_recursive", + "_can_do_sum_of_squares" + ] + }, + "alternative_spans": { + "sympy/solvers/diophantine.py": [ + "imports", + "_remove_gcd", + "_rational_pq", + "_nint_or_floor", + "diophantine", + "merge_solution", + "diop_solve", + "classify_diop", + "impl:5", + "diop_linear", + "_diop_linear", + "base_solution_linear", + "diop_quadratic", + "_diop_quadratic", + "is_solution_quad", + "diop_DN", + "_special_diop_DN", + "cornacchia", + "PQa", + "diop_bf_DN", + "equivalent", + "length", + "transformation_to_DN", + "_transformation_to_DN", + "find_DN", + "_find_DN", + "check_param", + "diop_ternary_quadratic", + "_diop_ternary_quadratic", + "transformation_to_normal", + "_transformation_to_normal", + "parametrize_ternary_quadratic", + "_parametrize_ternary_quadratic", + "diop_ternary_quadratic_normal", + "_diop_ternary_quadratic_normal", + "sqf_normal", + "square_factor", + "ldescent", + "descent", + "gaussian_reduce", + "dot", + "norm", + "holzer", + "diop_general_pythagorean", + "_diop_general_pythagorean", + "diop_general_sum_of_squares", + "_diop_general_sum_of_squares", + "diop_general_sum_of_even_powers", + "_diop_general_sum_of_even_powers", + "partition", + "prime_as_sum_of_two_squares", + "sum_of_three_squares", + "sum_of_four_squares", + "power_representation", + "pow_rep_recursive", + "_can_do_sum_of_squares" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + }, + "alternative_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + }, + "alternative_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + }, + "alternative_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + }, + "alternative_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + }, + "alternative_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + }, + "alternative_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + }, + "alternative_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + }, + "alternative_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ], + "sympy/solvers/tests/test_diophantine.py": [ + "test_diophantine_permute_sign" + ] + }, + "alternative_spans": { + "sympy/solvers/diophantine.py": [ + "diophantine" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-18199", + "repo": "sympy/sympy", + "base_commit": "ba80d1e493f21431b4bf729b3e0452cd47eb9566", + "problem_statement": "nthroot_mod function misses one root of x = 0 mod p.\nWhen in the equation x**n = a mod p , when a % p == 0. Then x = 0 mod p is also a root of this equation. But right now `nthroot_mod` does not check for this condition. `nthroot_mod(17*17, 5 , 17)` has a root `0 mod 17`. But it does not return it.\n", + "golden_patch": "diff --git a/sympy/ntheory/residue_ntheory.py b/sympy/ntheory/residue_ntheory.py\n--- a/sympy/ntheory/residue_ntheory.py\n+++ b/sympy/ntheory/residue_ntheory.py\n@@ -2,6 +2,7 @@\n \n from sympy.core.compatibility import as_int, range\n from sympy.core.function import Function\n+from sympy.utilities.iterables import cartes\n from sympy.core.numbers import igcd, igcdex, mod_inverse\n from sympy.core.power import isqrt\n from sympy.core.singleton import S\n@@ -742,6 +743,48 @@ def _nthroot_mod1(s, q, p, all_roots):\n return res\n return min(res)\n \n+def _nthroot_mod_composite(a, n, m):\n+ \"\"\"\n+ Find the solutions to ``x**n = a mod m`` when m is not prime.\n+ \"\"\"\n+ from sympy.ntheory.modular import crt\n+ f = factorint(m)\n+ dd = {}\n+ for p, e in f.items():\n+ tot_roots = set()\n+ if e == 1:\n+ tot_roots.update(nthroot_mod(a, n, p, True) or [])\n+ else:\n+ for root in nthroot_mod(a, n, p, True) or []:\n+ rootn = pow(root, n)\n+ diff = (rootn // (root or 1) * n) % p\n+ if diff != 0:\n+ ppow = p\n+ for j in range(1, e):\n+ ppow *= p\n+ root = (root - (rootn - a) * mod_inverse(diff, p)) % ppow\n+ tot_roots.add(root)\n+ else:\n+ new_base = p\n+ roots_in_base = {root}\n+ while new_base < pow(p, e):\n+ new_base *= p\n+ new_roots = set()\n+ for k in roots_in_base:\n+ if (pow(k, n) - a) % (new_base) != 0:\n+ continue\n+ while k not in new_roots:\n+ new_roots.add(k)\n+ k = (k + (new_base // p)) % new_base\n+ roots_in_base = new_roots\n+ tot_roots = tot_roots | roots_in_base\n+ dd[pow(p, e)] = tot_roots\n+ a = []\n+ m = []\n+ for x, y in dd.items():\n+ m.append(x)\n+ a.append(list(y))\n+ return sorted(set(crt(m, list(i))[0] for i in cartes(*a)))\n \n def nthroot_mod(a, n, p, all_roots=False):\n \"\"\"\n@@ -771,11 +814,12 @@ def nthroot_mod(a, n, p, all_roots=False):\n if n == 2:\n return sqrt_mod(a, p, all_roots)\n # see Hackman \"Elementary Number Theory\" (2009), page 76\n+ if not isprime(p):\n+ return _nthroot_mod_composite(a, n, p)\n+ if a % p == 0:\n+ return [0]\n if not is_nthpow_residue(a, n, p):\n return None\n- if not isprime(p):\n- raise NotImplementedError(\"Not implemented for composite p\")\n-\n if (p - 1) % n == 0:\n return _nthroot_mod1(a, n, p, all_roots)\n # The roots of ``x**n - a = 0 (mod p)`` are roots of\n", + "test_patch": "diff --git a/sympy/ntheory/tests/test_residue.py b/sympy/ntheory/tests/test_residue.py\n--- a/sympy/ntheory/tests/test_residue.py\n+++ b/sympy/ntheory/tests/test_residue.py\n@@ -162,7 +162,8 @@ def test_residue():\n assert is_nthpow_residue(31, 4, 41)\n assert not is_nthpow_residue(2, 2, 5)\n assert is_nthpow_residue(8547, 12, 10007)\n- raises(NotImplementedError, lambda: nthroot_mod(29, 31, 74))\n+\n+ assert nthroot_mod(29, 31, 74) == [45]\n assert nthroot_mod(1801, 11, 2663) == 44\n for a, q, p in [(51922, 2, 203017), (43, 3, 109), (1801, 11, 2663),\n (26118163, 1303, 33333347), (1499, 7, 2663), (595, 6, 2663),\n@@ -170,8 +171,12 @@ def test_residue():\n r = nthroot_mod(a, q, p)\n assert pow(r, q, p) == a\n assert nthroot_mod(11, 3, 109) is None\n- raises(NotImplementedError, lambda: nthroot_mod(16, 5, 36))\n- raises(NotImplementedError, lambda: nthroot_mod(9, 16, 36))\n+ assert nthroot_mod(16, 5, 36, True) == [4, 22]\n+ assert nthroot_mod(9, 16, 36, True) == [3, 9, 15, 21, 27, 33]\n+ assert nthroot_mod(4, 3, 3249000) == []\n+ assert nthroot_mod(36010, 8, 87382, True) == [40208, 47174]\n+ assert nthroot_mod(0, 12, 37, True) == [0]\n+ assert nthroot_mod(0, 7, 100, True) == [0, 10, 20, 30, 40, 50, 60, 70, 80, 90]\n \n for p in primerange(5, 100):\n qv = range(3, p, 4)\ndiff --git a/sympy/solvers/tests/test_solveset.py b/sympy/solvers/tests/test_solveset.py\n--- a/sympy/solvers/tests/test_solveset.py\n+++ b/sympy/solvers/tests/test_solveset.py\n@@ -2242,11 +2242,12 @@ def test_solve_modular():\n assert solveset(Mod(3**(3**x), 4) - 3, x, S.Integers) == \\\n Intersection(ImageSet(Lambda(n, Intersection({log(2*n + 1)/log(3)},\n S.Integers)), S.Naturals0), S.Integers)\n- # Not Implemented for m without primitive root\n+ # Implemented for m without primitive root\n assert solveset(Mod(x**3, 8) - 1, x, S.Integers) == \\\n- ConditionSet(x, Eq(Mod(x**3, 8) - 1, 0), S.Integers)\n+ ImageSet(Lambda(n, 8*n + 1), S.Integers)\n assert solveset(Mod(x**4, 9) - 4, x, S.Integers) == \\\n- ConditionSet(x, Eq(Mod(x**4, 9) - 4, 0), S.Integers)\n+ Union(ImageSet(Lambda(n, 9*n + 4), S.Integers),\n+ ImageSet(Lambda(n, 9*n + 5), S.Integers))\n # domain intersection\n assert solveset(3 - Mod(5*x - 8, 7), x, S.Naturals0) == \\\n Intersection(ImageSet(Lambda(n, 7*n + 5), S.Integers), S.Naturals0)\n", + "fail_to_pass": "[\"test_solve_modular\"]", + "pass_to_pass": "[\"test_invert_real\", \"test_invert_complex\", \"test_domain_check\", \"test_issue_11536\", \"test_issue_17479\", \"test_is_function_class_equation\", \"test_garbage_input\", \"test_solve_mul\", \"test_solve_invert\", \"test_errorinverses\", \"test_solve_polynomial\", \"test_return_root_of\", \"test__has_rational_power\", \"test_solveset_sqrt_1\", \"test_solveset_sqrt_2\", \"test_solve_polynomial_symbolic_param\", \"test_solve_rational\", \"test_solveset_real_gen_is_pow\", \"test_no_sol\", \"test_sol_zero_real\", \"test_no_sol_rational_extragenous\", \"test_solve_polynomial_cv_1a\", \"test_solveset_real_rational\", \"test_solveset_real_log\", \"test_poly_gens\", \"test_solve_abs\", \"test_issue_9565\", \"test_issue_10069\", \"test_real_imag_splitting\", \"test_units\", \"test_solve_only_exp_1\", \"test_atan2\", \"test_piecewise_solveset\", \"test_solveset_complex_polynomial\", \"test_sol_zero_complex\", \"test_solveset_complex_rational\", \"test_solveset_complex_exp\", \"test_solveset_real_exp\", \"test_solve_complex_log\", \"test_solve_complex_sqrt\", \"test_solveset_complex_tan\", \"test_solve_invalid_sol\", \"test_solveset\", \"test__solveset_multi\", \"test_conditionset\", \"test_solveset_domain\", \"test_improve_coverage\", \"test_issue_9522\", \"test_solvify\", \"test_abs_invert_solvify\", \"test_linear_eq_to_matrix\", \"test_issue_16577\", \"test_linsolve\", \"test_linsolve_immutable\", \"test_solve_decomposition\", \"test_nonlinsolve_basic\", \"test_nonlinsolve_abs\", \"test_raise_exception_nonlinsolve\", \"test_trig_system\", \"test_nonlinsolve_positive_dimensional\", \"test_nonlinsolve_polysys\", \"test_nonlinsolve_using_substitution\", \"test_nonlinsolve_complex\", \"test_issue_5132_1\", \"test_issue_5132_2\", \"test_issue_6752\", \"test_issue_2777\", \"test_issue_8828\", \"test_nonlinsolve_conditionset\", \"test_substitution_basic\", \"test_issue_5132_substitution\", \"test_raises_substitution\", \"test_issue_9556\", \"test_issue_9611\", \"test_issue_9557\", \"test_issue_9778\", \"test_issue_10214\", \"test_issue_9849\", \"test_issue_9953\", \"test_issue_9913\", \"test_issue_10397\", \"test_issue_14987\", \"test_simplification\", \"test_issue_10555\", \"test_issue_8715\", \"test_issue_11174\", \"test_issue_11534\", \"test_issue_10477\", \"test_issue_10671\", \"test_issue_11064\", \"test_issue_12478\", \"test_issue_12429\", \"test_solveset_arg\", \"test__is_finite_with_finite_vars\", \"test_issue_13550\", \"test_issue_13849\", \"test_issue_14223\", \"test_issue_10158\", \"test_issue_14300\", \"test_issue_14454\", \"test_term_factors\", \"test_transolve\", \"test_exponential_real\", \"test_expo_conditionset\", \"test_exponential_symbols\", \"test_is_exponential\", \"test_solve_exponential\", \"test_logarithmic\", \"test_is_logarithmic\", \"test_solve_logarithm\", \"test_linear_coeffs\", \"test_is_modular\", \"test_invert_modular\"]", + "expected_spans": { + "sympy/ntheory/residue_ntheory.py": [ + "imports", + "_nthroot_mod1", + "nthroot_mod" + ] + }, + "test_file_spans": { + "sympy/ntheory/tests/test_residue.py": [ + "test_residue" + ], + "sympy/solvers/tests/test_solveset.py": [ + "test_solve_modular" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-12284", + "repo": "django/django", + "base_commit": "c5e373d48cbdd923575956fed477b63d66d9603f", + "problem_statement": "Model.get_FOO_display() does not work correctly with inherited choices.\nDescription\n\t \n\t\t(last modified by Mariusz Felisiak)\n\t \nGiven a base model with choices A containing 3 tuples\nChild Model inherits the base model overrides the choices A and adds 2 more tuples\nget_foo_display does not work correctly for the new tuples added\nExample:\nclass A(models.Model):\n foo_choice = [(\"A\",\"output1\"),(\"B\",\"output2\")]\n field_foo = models.CharField(max_length=254,choices=foo_choice)\n class Meta:\n\t abstract = True\nclass B(A):\n foo_choice = [(\"A\",\"output1\"),(\"B\",\"output2\"),(\"C\",\"output3\")]\n field_foo = models.CharField(max_length=254,choices=foo_choice)\nUpon invoking get_field_foo_display() on instance of B , \nFor value \"A\" and \"B\" the output works correctly i.e. returns \"output1\" / \"output2\"\nbut for value \"C\" the method returns \"C\" and not \"output3\" which is the expected behaviour\n", + "golden_patch": "diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -764,7 +764,11 @@ def contribute_to_class(self, cls, name, private_only=False):\n if not getattr(cls, self.attname, None):\n setattr(cls, self.attname, self.descriptor_class(self))\n if self.choices is not None:\n- if not hasattr(cls, 'get_%s_display' % self.name):\n+ # Don't override a get_FOO_display() method defined explicitly on\n+ # this class, but don't check methods derived from inheritance, to\n+ # allow overriding inherited choices. For more complex inheritance\n+ # structures users should override contribute_to_class().\n+ if 'get_%s_display' % self.name not in cls.__dict__:\n setattr(\n cls,\n 'get_%s_display' % self.name,\n", + "test_patch": "diff --git a/tests/model_fields/tests.py b/tests/model_fields/tests.py\n--- a/tests/model_fields/tests.py\n+++ b/tests/model_fields/tests.py\n@@ -178,6 +178,19 @@ def get_foo_bar_display(self):\n f = FooBar(foo_bar=1)\n self.assertEqual(f.get_foo_bar_display(), 'something')\n \n+ def test_overriding_inherited_FIELD_display(self):\n+ class Base(models.Model):\n+ foo = models.CharField(max_length=254, choices=[('A', 'Base A')])\n+\n+ class Meta:\n+ abstract = True\n+\n+ class Child(Base):\n+ foo = models.CharField(max_length=254, choices=[('A', 'Child A'), ('B', 'Child B')])\n+\n+ self.assertEqual(Child(foo='A').get_foo_display(), 'Child A')\n+ self.assertEqual(Child(foo='B').get_foo_display(), 'Child B')\n+\n def test_iterator_choices(self):\n \"\"\"\n get_choices() works with Iterators.\n", + "fail_to_pass": "[\"test_overriding_inherited_FIELD_display (model_fields.tests.GetFieldDisplayTests)\"]", + "pass_to_pass": "[\"test_blank_in_choices (model_fields.tests.GetChoicesTests)\", \"test_blank_in_grouped_choices (model_fields.tests.GetChoicesTests)\", \"test_empty_choices (model_fields.tests.GetChoicesTests)\", \"test_lazy_strings_not_evaluated (model_fields.tests.GetChoicesTests)\", \"test_choices_and_field_display (model_fields.tests.GetFieldDisplayTests)\", \"test_empty_iterator_choices (model_fields.tests.GetFieldDisplayTests)\", \"A translated display value is coerced to str.\", \"test_iterator_choices (model_fields.tests.GetFieldDisplayTests)\", \"test_overriding_FIELD_display (model_fields.tests.GetFieldDisplayTests)\", \"test_check (model_fields.tests.ChoicesTests)\", \"test_choices (model_fields.tests.ChoicesTests)\", \"test_flatchoices (model_fields.tests.ChoicesTests)\", \"test_formfield (model_fields.tests.ChoicesTests)\", \"test_invalid_choice (model_fields.tests.ChoicesTests)\", \"test_get_choices (model_fields.tests.GetChoicesLimitChoicesToTests)\", \"test_get_choices_reverse_related_field (model_fields.tests.GetChoicesLimitChoicesToTests)\", \"Can supply a custom choices form class to Field.formfield()\", \"deconstruct() uses __qualname__ for nested class support.\", \"Field instances can be pickled.\", \"test_field_name (model_fields.tests.BasicFieldTests)\", \"Fields are ordered based on their creation.\", \"test_field_repr (model_fields.tests.BasicFieldTests)\", \"__repr__() uses __qualname__ for nested class support.\", \"test_field_str (model_fields.tests.BasicFieldTests)\", \"test_field_verbose_name (model_fields.tests.BasicFieldTests)\", \"Field.formfield() sets disabled for fields with choices.\", \"test_show_hidden_initial (model_fields.tests.BasicFieldTests)\", \"test_get_choices (model_fields.tests.GetChoicesOrderingTests)\", \"test_get_choices_default_ordering (model_fields.tests.GetChoicesOrderingTests)\", \"test_get_choices_reverse_related_field (model_fields.tests.GetChoicesOrderingTests)\", \"test_get_choices_reverse_related_field_default_ordering (model_fields.tests.GetChoicesOrderingTests)\"]", + "expected_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "test_file_spans": { + "tests/model_fields/tests.py": [ + "GetFieldDisplayTests.test_iterator_choices" + ] + }, + "resolved_by": [ + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/db/models/base.py": [ + "Model._get_FIELD_display" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.contribute_to_class" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/db/models/base.py": [ + "Model._get_FIELD_display" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/db/models/base.py": [ + "Model._get_FIELD_display" + ] + }, + "alternative_spans": {} + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/db/models/base.py": [ + "Model._get_FIELD_display" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/db/models/base.py": [ + "Model._get_FIELD_display" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "django/db/models/base.py": [ + "Model._get_FIELD_display" + ] + }, + "alternative_spans": {} + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-12286", + "repo": "django/django", + "base_commit": "979f61abd322507aafced9627702362e541ec34e", + "problem_statement": "translation.E004 shouldn't be raised on sublanguages when a base language is available.\nDescription\n\t\nAccording to Django documentation:\nIf a base language is available but the sublanguage specified is not, Django uses the base language. For example, if a user specifies de-at (Austrian German) but Django only has de available, Django uses de.\nHowever, when using Django 3.0.2, if my settings.py has\nLANGUAGE_CODE = \"de-at\"\nI get this error message:\nSystemCheckError: System check identified some issues:\nERRORS:\n?: (translation.E004) You have provided a value for the LANGUAGE_CODE setting that is not in the LANGUAGES setting.\nIf using\nLANGUAGE_CODE = \"es-ar\"\nDjango works fine (es-ar is one of the translations provided out of the box).\n", + "golden_patch": "diff --git a/django/core/checks/translation.py b/django/core/checks/translation.py\n--- a/django/core/checks/translation.py\n+++ b/django/core/checks/translation.py\n@@ -1,4 +1,5 @@\n from django.conf import settings\n+from django.utils.translation import get_supported_language_variant\n from django.utils.translation.trans_real import language_code_re\n \n from . import Error, Tags, register\n@@ -55,7 +56,9 @@ def check_setting_languages_bidi(app_configs, **kwargs):\n @register(Tags.translation)\n def check_language_settings_consistent(app_configs, **kwargs):\n \"\"\"Error if language settings are not consistent with each other.\"\"\"\n- available_tags = {i for i, _ in settings.LANGUAGES} | {'en-us'}\n- if settings.LANGUAGE_CODE not in available_tags:\n+ try:\n+ get_supported_language_variant(settings.LANGUAGE_CODE)\n+ except LookupError:\n return [E004]\n- return []\n+ else:\n+ return []\n", + "test_patch": "diff --git a/tests/check_framework/test_translation.py b/tests/check_framework/test_translation.py\n--- a/tests/check_framework/test_translation.py\n+++ b/tests/check_framework/test_translation.py\n@@ -3,7 +3,7 @@\n check_language_settings_consistent, check_setting_language_code,\n check_setting_languages, check_setting_languages_bidi,\n )\n-from django.test import SimpleTestCase\n+from django.test import SimpleTestCase, override_settings\n \n \n class TranslationCheckTests(SimpleTestCase):\n@@ -75,12 +75,36 @@ def test_invalid_languages_bidi(self):\n Error(msg % tag, id='translation.E003'),\n ])\n \n+ @override_settings(USE_I18N=True, LANGUAGES=[('en', 'English')])\n def test_inconsistent_language_settings(self):\n msg = (\n 'You have provided a value for the LANGUAGE_CODE setting that is '\n 'not in the LANGUAGES setting.'\n )\n- with self.settings(LANGUAGE_CODE='fr', LANGUAGES=[('en', 'English')]):\n- self.assertEqual(check_language_settings_consistent(None), [\n- Error(msg, id='translation.E004'),\n- ])\n+ for tag in ['fr', 'fr-CA', 'fr-357']:\n+ with self.subTest(tag), self.settings(LANGUAGE_CODE=tag):\n+ self.assertEqual(check_language_settings_consistent(None), [\n+ Error(msg, id='translation.E004'),\n+ ])\n+\n+ @override_settings(\n+ USE_I18N=True,\n+ LANGUAGES=[\n+ ('de', 'German'),\n+ ('es', 'Spanish'),\n+ ('fr', 'French'),\n+ ('ca', 'Catalan'),\n+ ],\n+ )\n+ def test_valid_variant_consistent_language_settings(self):\n+ tests = [\n+ # language + region.\n+ 'fr-CA',\n+ 'es-419',\n+ 'de-at',\n+ # language + region + variant.\n+ 'ca-ES-valencia',\n+ ]\n+ for tag in tests:\n+ with self.subTest(tag), self.settings(LANGUAGE_CODE=tag):\n+ self.assertEqual(check_language_settings_consistent(None), [])\n", + "fail_to_pass": "[\"test_valid_variant_consistent_language_settings (check_framework.test_translation.TranslationCheckTests)\"]", + "pass_to_pass": "[\"test_inconsistent_language_settings (check_framework.test_translation.TranslationCheckTests)\", \"test_invalid_language_code (check_framework.test_translation.TranslationCheckTests)\", \"test_invalid_languages (check_framework.test_translation.TranslationCheckTests)\", \"test_invalid_languages_bidi (check_framework.test_translation.TranslationCheckTests)\", \"test_valid_language_code (check_framework.test_translation.TranslationCheckTests)\", \"test_valid_languages (check_framework.test_translation.TranslationCheckTests)\", \"test_valid_languages_bidi (check_framework.test_translation.TranslationCheckTests)\"]", + "expected_spans": { + "django/core/checks/translation.py": [ + "imports", + "check_language_settings_consistent" + ] + }, + "test_file_spans": { + "tests/check_framework/test_translation.py": [ + "imports", + "TranslationCheckTests.test_inconsistent_language_settings" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/core/checks/translation.py": [ + "imports", + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "imports", + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/core/checks/translation.py": [ + "imports", + "check_setting_languages", + "check_setting_languages_bidi", + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "imports", + "check_setting_languages", + "check_setting_languages_bidi", + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/core/checks/translation.py": [ + "imports", + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "imports", + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ], + "tests/i18n/sampleproject/sampleproject/settings.py": [] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ], + "tests/check_framework/test_translation.py": [ + "imports", + "TranslationCheckTests.setUp", + "TranslationCheckTests.test_valid_language_code", + "TranslationCheckTests.test_invalid_language_code", + "TranslationCheckTests.test_valid_languages", + "TranslationCheckTests.test_invalid_languages", + "TranslationCheckTests.test_valid_languages_bidi", + "TranslationCheckTests.test_invalid_languages_bidi", + "TranslationCheckTests.test_inconsistent_language_settings" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + }, + "alternative_spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240523_aider", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240828_autose_mixed", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240615_appmap-navie_gpt4o", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240530_autocoderover-v20240408", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240702_codestory_aide_mixed", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240706_sima_gpt4o", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240623_moatless_claude35sonnet", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240908_infant_gpt4o", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240829_Isoform", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240604_CodeR", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240622_Lingma_Agent", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20241016_IBM-SWE-1.0", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240402_sweagent_claude3opus", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240612_IBM_Research_Agent101", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240621_autocoderover-v20240620", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240617_factory_code_droid", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240402_sweagent_gpt4", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240912_marscode-agent-dev", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240617_moatless_gpt4o", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240811_gru", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240620_sweagent_claude3.5sonnet", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + }, + { + "run_name": "20240721_amazon-q-developer-agent-20240719-dev", + "spans": { + "django/core/checks/translation.py": [ + "check_language_settings_consistent" + ] + } + } + ] + }, + { + "instance_id": "django__django-12308", + "repo": "django/django", + "base_commit": "2e0f04507b17362239ba49830d26fec504d46978", + "problem_statement": "JSONField are not properly displayed in admin when they are readonly.\nDescription\n\t\nJSONField values are displayed as dict when readonly in the admin.\nFor example, {\"foo\": \"bar\"} would be displayed as {'foo': 'bar'}, which is not valid JSON.\nI believe the fix would be to add a special case in django.contrib.admin.utils.display_for_field to call the prepare_value of the JSONField (not calling json.dumps directly to take care of the InvalidJSONInput case).\n", + "golden_patch": "diff --git a/django/contrib/admin/utils.py b/django/contrib/admin/utils.py\n--- a/django/contrib/admin/utils.py\n+++ b/django/contrib/admin/utils.py\n@@ -398,6 +398,11 @@ def display_for_field(value, field, empty_value_display):\n return formats.number_format(value)\n elif isinstance(field, models.FileField) and value:\n return format_html('{}', value.url, value)\n+ elif isinstance(field, models.JSONField) and value:\n+ try:\n+ return field.get_prep_value(value)\n+ except TypeError:\n+ return display_for_value(value, empty_value_display)\n else:\n return display_for_value(value, empty_value_display)\n \n", + "test_patch": "diff --git a/tests/admin_utils/tests.py b/tests/admin_utils/tests.py\n--- a/tests/admin_utils/tests.py\n+++ b/tests/admin_utils/tests.py\n@@ -176,6 +176,23 @@ def test_null_display_for_field(self):\n display_value = display_for_field(None, models.FloatField(), self.empty_value)\n self.assertEqual(display_value, self.empty_value)\n \n+ display_value = display_for_field(None, models.JSONField(), self.empty_value)\n+ self.assertEqual(display_value, self.empty_value)\n+\n+ def test_json_display_for_field(self):\n+ tests = [\n+ ({'a': {'b': 'c'}}, '{\"a\": {\"b\": \"c\"}}'),\n+ (['a', 'b'], '[\"a\", \"b\"]'),\n+ ('a', '\"a\"'),\n+ ({('a', 'b'): 'c'}, \"{('a', 'b'): 'c'}\"), # Invalid JSON.\n+ ]\n+ for value, display_value in tests:\n+ with self.subTest(value=value):\n+ self.assertEqual(\n+ display_for_field(value, models.JSONField(), self.empty_value),\n+ display_value,\n+ )\n+\n def test_number_formats_display_for_field(self):\n display_value = display_for_field(12345.6789, models.FloatField(), self.empty_value)\n self.assertEqual(display_value, '12345.6789')\n", + "fail_to_pass": "[\"test_json_display_for_field (admin_utils.tests.UtilsTests)\", \"test_label_for_field (admin_utils.tests.UtilsTests)\"]", + "pass_to_pass": "[\"test_cyclic (admin_utils.tests.NestedObjectsTests)\", \"test_non_added_parent (admin_utils.tests.NestedObjectsTests)\", \"test_on_delete_do_nothing (admin_utils.tests.NestedObjectsTests)\", \"test_queries (admin_utils.tests.NestedObjectsTests)\", \"test_relation_on_abstract (admin_utils.tests.NestedObjectsTests)\", \"test_siblings (admin_utils.tests.NestedObjectsTests)\", \"test_unrelated_roots (admin_utils.tests.NestedObjectsTests)\", \"test_flatten (admin_utils.tests.UtilsTests)\", \"test_flatten_fieldsets (admin_utils.tests.UtilsTests)\", \"test_label_for_field_form_argument (admin_utils.tests.UtilsTests)\", \"test_label_for_property (admin_utils.tests.UtilsTests)\", \"test_list_display_for_value (admin_utils.tests.UtilsTests)\", \"test_list_display_for_value_boolean (admin_utils.tests.UtilsTests)\", \"test_null_display_for_field (admin_utils.tests.UtilsTests)\", \"test_number_formats_display_for_field (admin_utils.tests.UtilsTests)\", \"test_number_formats_with_thousand_separator_display_for_field (admin_utils.tests.UtilsTests)\", \"test_quote (admin_utils.tests.UtilsTests)\", \"test_related_name (admin_utils.tests.UtilsTests)\", \"test_safestring_in_field_label (admin_utils.tests.UtilsTests)\", \"test_values_from_lookup_field (admin_utils.tests.UtilsTests)\"]", + "expected_spans": { + "django/contrib/admin/utils.py": [ + "display_for_field" + ] + }, + "test_file_spans": { + "tests/admin_utils/tests.py": [ + "UtilsTests.test_number_formats_display_for_field" + ] + }, + "resolved_by": [ + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/contrib/admin/utils.py": [ + "display_for_field" + ] + }, + "alternative_spans": { + "django/contrib/admin/utils.py": [ + "display_for_field" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/contrib/admin/utils.py": [ + "imports", + "display_for_field" + ] + }, + "alternative_spans": { + "django/contrib/admin/utils.py": [ + "imports", + "display_for_field" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/contrib/admin/utils.py": [ + "display_for_field" + ] + }, + "alternative_spans": { + "django/contrib/admin/utils.py": [ + "display_for_field" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-18532", + "repo": "sympy/sympy", + "base_commit": "74227f900b05009d4eed62e34a166228788a32ca", + "problem_statement": "expr.atoms() should return objects with no args instead of subclasses of Atom\n`expr.atoms()` with no arguments returns subclasses of `Atom` in `expr`. But the correct definition of a leaf node should be that it has no `.args`. \n\nThis should be easy to fix, but one needs to check that this doesn't affect the performance. \n\n", + "golden_patch": "diff --git a/sympy/core/basic.py b/sympy/core/basic.py\n--- a/sympy/core/basic.py\n+++ b/sympy/core/basic.py\n@@ -503,12 +503,11 @@ def atoms(self, *types):\n if types:\n types = tuple(\n [t if isinstance(t, type) else type(t) for t in types])\n+ nodes = preorder_traversal(self)\n+ if types:\n+ result = {node for node in nodes if isinstance(node, types)}\n else:\n- types = (Atom,)\n- result = set()\n- for expr in preorder_traversal(self):\n- if isinstance(expr, types):\n- result.add(expr)\n+ result = {node for node in nodes if not node.args}\n return result\n \n @property\n", + "test_patch": "diff --git a/sympy/codegen/tests/test_cnodes.py b/sympy/codegen/tests/test_cnodes.py\n--- a/sympy/codegen/tests/test_cnodes.py\n+++ b/sympy/codegen/tests/test_cnodes.py\n@@ -1,6 +1,6 @@\n from sympy.core.symbol import symbols\n from sympy.printing.ccode import ccode\n-from sympy.codegen.ast import Declaration, Variable, float64, int64\n+from sympy.codegen.ast import Declaration, Variable, float64, int64, String\n from sympy.codegen.cnodes import (\n alignof, CommaOperator, goto, Label, PreDecrement, PostDecrement, PreIncrement, PostIncrement,\n sizeof, union, struct\n@@ -66,7 +66,7 @@ def test_sizeof():\n assert ccode(sz) == 'sizeof(%s)' % typename\n assert sz.func(*sz.args) == sz\n assert not sz.is_Atom\n- assert all(atom == typename for atom in sz.atoms())\n+ assert sz.atoms() == {String('unsigned int'), String('sizeof')}\n \n \n def test_struct():\ndiff --git a/sympy/core/tests/test_basic.py b/sympy/core/tests/test_basic.py\n--- a/sympy/core/tests/test_basic.py\n+++ b/sympy/core/tests/test_basic.py\n@@ -137,7 +137,7 @@ def test_subs_with_unicode_symbols():\n \n \n def test_atoms():\n- assert b21.atoms() == set()\n+ assert b21.atoms() == set([Basic()])\n \n \n def test_free_symbols_empty():\n", + "fail_to_pass": "[\"test_sizeof\", \"test_atoms\"]", + "pass_to_pass": "[\"test_alignof\", \"test_CommaOperator\", \"test_goto_Label\", \"test_PreDecrement\", \"test_PostDecrement\", \"test_PreIncrement\", \"test_PostIncrement\", \"test_struct\", \"test__aresame\", \"test_structure\", \"test_equality\", \"test_matches_basic\", \"test_has\", \"test_subs\", \"test_subs_with_unicode_symbols\", \"test_free_symbols_empty\", \"test_doit\", \"test_S\", \"test_xreplace\", \"test_preorder_traversal\", \"test_sorted_args\", \"test_call\", \"test_rewrite\", \"test_literal_evalf_is_number_is_zero_is_comparable\", \"test_as_Basic\", \"test_atomic\", \"test_as_dummy\", \"test_canonical_variables\"]", + "expected_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + }, + "test_file_spans": { + "sympy/codegen/tests/test_cnodes.py": [ + "imports", + "test_sizeof" + ], + "sympy/core/tests/test_basic.py": [ + "test_atoms" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + }, + "alternative_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + }, + "alternative_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + }, + "alternative_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sympy/core/basic.py": [ + "imports", + "as_Basic", + "Basic", + "Basic.__new__", + "Basic.__hash__", + "Basic.compare", + "Basic._compare_pretty", + "Basic.sort_key", + "Basic.__eq__", + "Basic.dummy_eq", + "Basic.__str__", + "Basic._repr_latex_", + "Basic:65", + "Basic.atoms", + "Basic.as_dummy", + "Basic.canonical_variables", + "Basic._recursive_call", + "Basic.is_comparable", + "Basic.subs", + "Basic._subs", + "Basic._has", + "Basic.replace", + "Basic.find", + "Basic.matches", + "Basic.doit", + "Basic._eval_rewrite", + "Basic._accept_eval_derivative", + "Basic._visit_eval_derivative_scalar", + "Basic._visit_eval_derivative_array", + "Basic._eval_derivative_n_times", + "Basic.rewrite", + "Basic:67", + "Basic._exec_constructor_postprocessors", + "Atom", + "Atom._sorted_args", + "_aresame", + "_atomic", + "preorder_traversal.__init__", + "preorder_traversal._preorder_traversal" + ] + }, + "alternative_spans": { + "sympy/core/basic.py": [ + "imports", + "as_Basic", + "Basic", + "Basic.__new__", + "Basic.__hash__", + "Basic.compare", + "Basic._compare_pretty", + "Basic.sort_key", + "Basic.__eq__", + "Basic.dummy_eq", + "Basic.__str__", + "Basic._repr_latex_", + "Basic:65", + "Basic.atoms", + "Basic.as_dummy", + "Basic.canonical_variables", + "Basic._recursive_call", + "Basic.is_comparable", + "Basic.subs", + "Basic._subs", + "Basic._has", + "Basic.replace", + "Basic.find", + "Basic.matches", + "Basic.doit", + "Basic._eval_rewrite", + "Basic._accept_eval_derivative", + "Basic._visit_eval_derivative_scalar", + "Basic._visit_eval_derivative_array", + "Basic._eval_derivative_n_times", + "Basic.rewrite", + "Basic:67", + "Basic._exec_constructor_postprocessors", + "Atom", + "Atom._sorted_args", + "_aresame", + "_atomic", + "preorder_traversal.__init__", + "preorder_traversal._preorder_traversal" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + }, + "alternative_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + }, + "alternative_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + }, + "alternative_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + }, + "alternative_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + }, + "alternative_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sympy/combinatorics/permutations.py": [ + "Permutation.atoms" + ], + "sympy/core/basic.py": [ + "Basic.atoms" + ], + "sympy/matrices/common.py": [ + "imports", + "MatrixProperties.atoms" + ] + }, + "alternative_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + }, + "alternative_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + }, + "alternative_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + }, + "alternative_spans": { + "sympy/core/basic.py": [ + "Basic.atoms" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-18621", + "repo": "sympy/sympy", + "base_commit": "b17ef6effe278d5b861d65896cc53442a6370d8f", + "problem_statement": "BlockDiagMatrix with one element cannot be converted to regular Matrix\nCreating a BlockDiagMatrix with one Matrix element will raise if trying to convert it back to a regular Matrix:\r\n\r\n```python\r\nM = sympy.Matrix([[1, 2], [3, 4]])\r\nD = sympy.BlockDiagMatrix(M)\r\nB = sympy.Matrix(D)\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n\r\n File \"\", line 3, in \r\n B = sympy.Matrix(D)\r\n\r\n File \"/home/rikard/.local/lib/python3.7/site-packages/sympy/matrices/dense.py\", line 430, in __new__\r\n return cls._new(*args, **kwargs)\r\n\r\n File \"/home/rikard/.local/lib/python3.7/site-packages/sympy/matrices/dense.py\", line 442, in _new\r\n rows, cols, flat_list = cls._handle_creation_inputs(*args, **kwargs)\r\n\r\n File \"/home/rikard/.local/lib/python3.7/site-packages/sympy/matrices/matrices.py\", line 2528, in _handle_creation_inputs\r\n return args[0].rows, args[0].cols, args[0].as_explicit()._mat\r\n\r\n File \"/home/rikard/.local/lib/python3.7/site-packages/sympy/matrices/expressions/matexpr.py\", line 340, in as_explicit\r\n for i in range(self.rows)])\r\n\r\n File \"/home/rikard/.local/lib/python3.7/site-packages/sympy/matrices/expressions/matexpr.py\", line 340, in \r\n for i in range(self.rows)])\r\n\r\n File \"/home/rikard/.local/lib/python3.7/site-packages/sympy/matrices/expressions/matexpr.py\", line 339, in \r\n for j in range(self.cols)]\r\n\r\n File \"/home/rikard/.local/lib/python3.7/site-packages/sympy/matrices/expressions/matexpr.py\", line 289, in __getitem__\r\n return self._entry(i, j)\r\n\r\n File \"/home/rikard/.local/lib/python3.7/site-packages/sympy/matrices/expressions/blockmatrix.py\", line 248, in _entry\r\n return self.blocks[row_block, col_block][i, j]\r\n\r\nTypeError: 'One' object is not subscriptable\r\n```\r\n\r\nInstead having two elements will work as expected:\r\n\r\n```python\r\nM = sympy.Matrix([[1, 2], [3, 4]])\r\nD = sympy.BlockDiagMatrix(M, M)\r\nB = sympy.Matrix(D)\r\n```\r\n\r\n```\r\nMatrix([\r\n[1, 2, 0, 0],\r\n[3, 4, 0, 0],\r\n[0, 0, 1, 2],\r\n[0, 0, 3, 4]])\r\n```\r\nThis issue exists for sympy 1.5.1 but not for sympy 1.4\n", + "golden_patch": "diff --git a/sympy/matrices/expressions/blockmatrix.py b/sympy/matrices/expressions/blockmatrix.py\n--- a/sympy/matrices/expressions/blockmatrix.py\n+++ b/sympy/matrices/expressions/blockmatrix.py\n@@ -301,7 +301,7 @@ def blocks(self):\n data = [[mats[i] if i == j else ZeroMatrix(mats[i].rows, mats[j].cols)\n for j in range(len(mats))]\n for i in range(len(mats))]\n- return ImmutableDenseMatrix(data)\n+ return ImmutableDenseMatrix(data, evaluate=False)\n \n @property\n def shape(self):\n", + "test_patch": "diff --git a/sympy/matrices/expressions/tests/test_blockmatrix.py b/sympy/matrices/expressions/tests/test_blockmatrix.py\n--- a/sympy/matrices/expressions/tests/test_blockmatrix.py\n+++ b/sympy/matrices/expressions/tests/test_blockmatrix.py\n@@ -110,6 +110,10 @@ def test_issue_17624():\n assert block_collapse(b * b) == BlockMatrix([[a**2, z], [z, z]])\n assert block_collapse(b * b * b) == BlockMatrix([[a**3, z], [z, z]])\n \n+def test_issue_18618():\n+ A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n+ assert A == Matrix(BlockDiagMatrix(A))\n+\n def test_BlockMatrix_trace():\n A, B, C, D = [MatrixSymbol(s, 3, 3) for s in 'ABCD']\n X = BlockMatrix([[A, B], [C, D]])\n", + "fail_to_pass": "[\"test_issue_18618\"]", + "pass_to_pass": "[\"test_bc_matmul\", \"test_bc_matadd\", \"test_bc_transpose\", \"test_bc_dist_diag\", \"test_block_plus_ident\", \"test_BlockMatrix\", \"test_block_collapse_explicit_matrices\", \"test_issue_17624\", \"test_BlockMatrix_trace\", \"test_BlockMatrix_Determinant\", \"test_squareBlockMatrix\", \"test_BlockDiagMatrix\", \"test_blockcut\", \"test_reblock_2x2\", \"test_deblock\"]", + "expected_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix.blocks" + ] + }, + "test_file_spans": { + "sympy/matrices/expressions/tests/test_blockmatrix.py": [ + "test_BlockMatrix_trace" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockMatrix._entry", + "BlockDiagMatrix.blocks" + ] + }, + "alternative_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockMatrix._entry", + "BlockDiagMatrix.blocks" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "sympy/matrices/expressions/matexpr.py": [ + "MatrixExpr.as_explicit" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix.__new__" + ] + }, + "alternative_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix.__new__" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix.__new__", + "BlockDiagMatrix.blocks" + ] + }, + "alternative_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix.__new__", + "BlockDiagMatrix.blocks" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix.__new__" + ] + }, + "alternative_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix.__new__" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "imports", + "BlockDiagMatrix" + ], + "sympy/matrices/expressions/tests/test_blockmatrix.py": [] + }, + "alternative_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "imports", + "BlockDiagMatrix" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sympy/matrices/expressions/matexpr.py": [ + "MatrixExpr.as_explicit" + ] + }, + "alternative_spans": {} + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockMatrix._entry", + "BlockDiagMatrix.blocks", + "BlockDiagMatrix._eval_inverse" + ] + }, + "alternative_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockMatrix._entry", + "BlockDiagMatrix.blocks", + "BlockDiagMatrix._eval_inverse" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix" + ] + }, + "alternative_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sympy/matrices/expressions/matexpr.py": [ + "MatrixExpr.as_explicit" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix.diag" + ] + }, + "alternative_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix.diag" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix" + ] + }, + "alternative_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockMatrix._entry", + "BlockDiagMatrix.blocks" + ], + "sympy/matrices/matrices.py": [ + "MatrixBase._handle_creation_inputs" + ] + }, + "alternative_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockMatrix._entry", + "BlockDiagMatrix.blocks" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix", + "BlockDiagMatrix.__new__", + "BlockDiagMatrix.diag" + ] + }, + "alternative_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix", + "BlockDiagMatrix.__new__", + "BlockDiagMatrix.diag" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockMatrix._entry", + "BlockDiagMatrix._eval_transpose" + ] + }, + "alternative_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockMatrix._entry", + "BlockDiagMatrix._eval_transpose" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockMatrix._entry", + "BlockDiagMatrix.__new__", + "BlockDiagMatrix.diag", + "BlockDiagMatrix._eval_inverse" + ], + "sympy/matrices/expressions/tests/test_blockmatrix.py": [] + }, + "alternative_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockMatrix._entry", + "BlockDiagMatrix.__new__", + "BlockDiagMatrix.diag", + "BlockDiagMatrix._eval_inverse" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix.__new__", + "BlockDiagMatrix._eval_inverse" + ] + }, + "alternative_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix.__new__", + "BlockDiagMatrix._eval_inverse" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix._eval_inverse" + ] + }, + "alternative_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix._eval_inverse" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240615_appmap-navie_gpt4o", + "spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix.__new__" + ] + } + }, + { + "run_name": "20240623_moatless_claude35sonnet", + "spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix.__new__" + ] + } + }, + { + "run_name": "20240820_honeycomb", + "spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "imports", + "BlockDiagMatrix" + ] + } + }, + { + "run_name": "20240612_IBM_Research_Agent101", + "spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix" + ] + } + }, + { + "run_name": "20240617_factory_code_droid", + "spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix.diag" + ] + } + }, + { + "run_name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix" + ] + } + }, + { + "run_name": "20240509_amazon-q-developer-agent-20240430-dev", + "spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix", + "BlockDiagMatrix.__new__", + "BlockDiagMatrix.diag" + ] + } + }, + { + "run_name": "20240811_gru", + "spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockMatrix._entry", + "BlockDiagMatrix._eval_transpose" + ] + } + }, + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockMatrix._entry", + "BlockDiagMatrix.__new__", + "BlockDiagMatrix.diag", + "BlockDiagMatrix._eval_inverse" + ] + } + }, + { + "run_name": "20240620_sweagent_claude3.5sonnet", + "spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix.__new__", + "BlockDiagMatrix._eval_inverse" + ] + } + }, + { + "run_name": "20240721_amazon-q-developer-agent-20240719-dev", + "spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "BlockDiagMatrix._eval_inverse" + ] + } + } + ] + }, + { + "instance_id": "django__django-12453", + "repo": "django/django", + "base_commit": "b330b918e979ea39a21d47b61172d112caf432c3", + "problem_statement": "`TransactionTestCase.serialized_rollback` fails to restore objects due to ordering constraints\nDescription\n\t\nI hit this problem in a fairly complex projet and haven't had the time to write a minimal reproduction case. I think it can be understood just by inspecting the code so I'm going to describe it while I have it in mind.\nSetting serialized_rollback = True on a TransactionTestCase triggers \u200brollback emulation. In practice, for each database:\nBaseDatabaseCreation.create_test_db calls connection._test_serialized_contents = connection.creation.serialize_db_to_string()\nTransactionTestCase._fixture_setup calls connection.creation.deserialize_db_from_string(connection._test_serialized_contents)\n(The actual code isn't written that way; it's equivalent but the symmetry is less visible.)\nserialize_db_to_string orders models with serializers.sort_dependencies and serializes them. The sorting algorithm only deals with natural keys. It doesn't do anything to order models referenced by foreign keys before models containing said foreign keys. That wouldn't be possible in general because circular foreign keys are allowed.\ndeserialize_db_from_string deserializes and saves models without wrapping in a transaction. This can result in integrity errors if an instance containing a foreign key is saved before the instance it references. I'm suggesting to fix it as follows:\ndiff --git a/django/db/backends/base/creation.py b/django/db/backends/base/creation.py\nindex bca8376..7bed2be 100644\n--- a/django/db/backends/base/creation.py\n+++ b/django/db/backends/base/creation.py\n@@ -4,7 +4,7 @@ import time\n from django.apps import apps\n from django.conf import settings\n from django.core import serializers\n-from django.db import router\n+from django.db import router, transaction\n from django.utils.six import StringIO\n from django.utils.six.moves import input\n \n@@ -128,8 +128,9 @@ class BaseDatabaseCreation(object):\n\t\t the serialize_db_to_string method.\n\t\t \"\"\"\n\t\t data = StringIO(data)\n-\t\tfor obj in serializers.deserialize(\"json\", data, using=self.connection.alias):\n-\t\t\tobj.save()\n+\t\twith transaction.atomic(using=self.connection.alias):\n+\t\t\tfor obj in serializers.deserialize(\"json\", data, using=self.connection.alias):\n+\t\t\t\tobj.save()\n \n\t def _get_database_display_str(self, verbosity, database_name):\n\t\t \"\"\"\nNote that loaddata doesn't have this problem because it wraps everything in a transaction:\n\tdef handle(self, *fixture_labels, **options):\n\t\t# ...\n\t\twith transaction.atomic(using=self.using):\n\t\t\tself.loaddata(fixture_labels)\n\t\t# ...\nThis suggest that the transaction was just forgotten in the implementation of deserialize_db_from_string.\nIt should be possible to write a deterministic test for this bug because the order in which serialize_db_to_string serializes models depends on the app registry, and the app registry uses OrderedDict to store apps and models in a deterministic order.\n", + "golden_patch": "diff --git a/django/db/backends/base/creation.py b/django/db/backends/base/creation.py\n--- a/django/db/backends/base/creation.py\n+++ b/django/db/backends/base/creation.py\n@@ -6,6 +6,7 @@\n from django.conf import settings\n from django.core import serializers\n from django.db import router\n+from django.db.transaction import atomic\n \n # The prefix to put on the default database name when creating\n # the test database.\n@@ -126,8 +127,16 @@ def deserialize_db_from_string(self, data):\n the serialize_db_to_string() method.\n \"\"\"\n data = StringIO(data)\n- for obj in serializers.deserialize(\"json\", data, using=self.connection.alias):\n- obj.save()\n+ # Load data in a transaction to handle forward references and cycles.\n+ with atomic(using=self.connection.alias):\n+ # Disable constraint checks, because some databases (MySQL) doesn't\n+ # support deferred checks.\n+ with self.connection.constraint_checks_disabled():\n+ for obj in serializers.deserialize('json', data, using=self.connection.alias):\n+ obj.save()\n+ # Manually check for any invalid keys that might have been added,\n+ # because constraint checks were disabled.\n+ self.connection.check_constraints()\n \n def _get_database_display_str(self, verbosity, database_name):\n \"\"\"\n", + "test_patch": "diff --git a/tests/backends/base/test_creation.py b/tests/backends/base/test_creation.py\n--- a/tests/backends/base/test_creation.py\n+++ b/tests/backends/base/test_creation.py\n@@ -7,6 +7,8 @@\n )\n from django.test import SimpleTestCase\n \n+from ..models import Object, ObjectReference\n+\n \n def get_connection_copy():\n # Get a copy of the default connection. (Can't use django.db.connection\n@@ -73,3 +75,29 @@ def test_migrate_test_setting_true(self, mocked_migrate, mocked_ensure_connectio\n finally:\n with mock.patch.object(creation, '_destroy_test_db'):\n creation.destroy_test_db(old_database_name, verbosity=0)\n+\n+\n+class TestDeserializeDbFromString(SimpleTestCase):\n+ databases = {'default'}\n+\n+ def test_circular_reference(self):\n+ # deserialize_db_from_string() handles circular references.\n+ data = \"\"\"\n+ [\n+ {\n+ \"model\": \"backends.object\",\n+ \"pk\": 1,\n+ \"fields\": {\"obj_ref\": 1, \"related_objects\": []}\n+ },\n+ {\n+ \"model\": \"backends.objectreference\",\n+ \"pk\": 1,\n+ \"fields\": {\"obj\": 1}\n+ }\n+ ]\n+ \"\"\"\n+ connection.creation.deserialize_db_from_string(data)\n+ obj = Object.objects.get()\n+ obj_ref = ObjectReference.objects.get()\n+ self.assertEqual(obj.obj_ref, obj_ref)\n+ self.assertEqual(obj_ref.obj, obj)\ndiff --git a/tests/backends/models.py b/tests/backends/models.py\n--- a/tests/backends/models.py\n+++ b/tests/backends/models.py\n@@ -89,6 +89,7 @@ def __str__(self):\n \n class Object(models.Model):\n related_objects = models.ManyToManyField(\"self\", db_constraint=False, symmetrical=False)\n+ obj_ref = models.ForeignKey('ObjectReference', models.CASCADE, null=True)\n \n def __str__(self):\n return str(self.id)\n", + "fail_to_pass": "[\"test_circular_reference (backends.base.test_creation.TestDeserializeDbFromString)\"]", + "pass_to_pass": "[\"test_custom_test_name (backends.base.test_creation.TestDbSignatureTests)\", \"test_custom_test_name_with_test_prefix (backends.base.test_creation.TestDbSignatureTests)\", \"test_default_name (backends.base.test_creation.TestDbSignatureTests)\", \"test_migrate_test_setting_false (backends.base.test_creation.TestDbCreationTests)\", \"test_migrate_test_setting_true (backends.base.test_creation.TestDbCreationTests)\"]", + "expected_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "test_file_spans": { + "tests/backends/base/test_creation.py": [ + "imports" + ], + "tests/backends/models.py": [ + "Object" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.__init__", + "BaseDatabaseCreation.create_test_db", + "BaseDatabaseCreation.serialize_db_to_string", + "BaseDatabaseCreation.deserialize_db_from_string", + "BaseDatabaseCreation._get_database_display_str", + "BaseDatabaseCreation._create_test_db", + "BaseDatabaseCreation.clone_test_db", + "BaseDatabaseCreation.get_test_db_clone_settings", + "BaseDatabaseCreation._clone_test_db", + "BaseDatabaseCreation.destroy_test_db", + "BaseDatabaseCreation._destroy_test_db", + "BaseDatabaseCreation.test_db_signature" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.__init__", + "BaseDatabaseCreation.create_test_db", + "BaseDatabaseCreation.serialize_db_to_string", + "BaseDatabaseCreation.deserialize_db_from_string", + "BaseDatabaseCreation._get_database_display_str", + "BaseDatabaseCreation._create_test_db", + "BaseDatabaseCreation.clone_test_db", + "BaseDatabaseCreation.get_test_db_clone_settings", + "BaseDatabaseCreation._clone_test_db", + "BaseDatabaseCreation.destroy_test_db", + "BaseDatabaseCreation._destroy_test_db", + "BaseDatabaseCreation.test_db_signature" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ], + "tests/auth_tests/settings.py": [ + "imports" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + }, + "alternative_spans": { + "django/db/backends/base/creation.py": [ + "imports", + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240530_autocoderover-v20240408", + "spans": { + "django/db/backends/base/creation.py": [ + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "run_name": "20240702_codestory_aide_mixed", + "spans": { + "django/db/backends/base/creation.py": [ + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + }, + { + "run_name": "20240620_sweagent_claude3.5sonnet", + "spans": { + "django/db/backends/base/creation.py": [ + "BaseDatabaseCreation.deserialize_db_from_string" + ] + } + } + ] + }, + { + "instance_id": "django__django-12470", + "repo": "django/django", + "base_commit": "142ab6846ac09d6d401e26fc8b6b988a583ac0f5", + "problem_statement": "Inherited model doesn't correctly order by \"-pk\" when specified on Parent.Meta.ordering\nDescription\n\t\nGiven the following model definition:\nfrom django.db import models\nclass Parent(models.Model):\n\tclass Meta:\n\t\tordering = [\"-pk\"]\nclass Child(Parent):\n\tpass\nQuerying the Child class results in the following:\n>>> print(Child.objects.all().query)\nSELECT \"myapp_parent\".\"id\", \"myapp_child\".\"parent_ptr_id\" FROM \"myapp_child\" INNER JOIN \"myapp_parent\" ON (\"myapp_child\".\"parent_ptr_id\" = \"myapp_parent\".\"id\") ORDER BY \"myapp_parent\".\"id\" ASC\nThe query is ordered ASC but I expect the order to be DESC.\n", + "golden_patch": "diff --git a/django/db/models/sql/compiler.py b/django/db/models/sql/compiler.py\n--- a/django/db/models/sql/compiler.py\n+++ b/django/db/models/sql/compiler.py\n@@ -709,9 +709,9 @@ def find_ordering_name(self, name, opts, alias=None, default_order='ASC',\n field, targets, alias, joins, path, opts, transform_function = self._setup_joins(pieces, opts, alias)\n \n # If we get to this point and the field is a relation to another model,\n- # append the default ordering for that model unless the attribute name\n- # of the field is specified.\n- if field.is_relation and opts.ordering and getattr(field, 'attname', None) != name:\n+ # append the default ordering for that model unless it is the pk\n+ # shortcut or the attribute name of the field that is specified.\n+ if field.is_relation and opts.ordering and getattr(field, 'attname', None) != name and name != 'pk':\n # Firstly, avoid infinite loops.\n already_seen = already_seen or set()\n join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins)\n", + "test_patch": "diff --git a/tests/model_inheritance/models.py b/tests/model_inheritance/models.py\n--- a/tests/model_inheritance/models.py\n+++ b/tests/model_inheritance/models.py\n@@ -181,6 +181,8 @@ class GrandParent(models.Model):\n place = models.ForeignKey(Place, models.CASCADE, null=True, related_name='+')\n \n class Meta:\n+ # Ordering used by test_inherited_ordering_pk_desc.\n+ ordering = ['-pk']\n unique_together = ('first_name', 'last_name')\n \n \ndiff --git a/tests/model_inheritance/tests.py b/tests/model_inheritance/tests.py\n--- a/tests/model_inheritance/tests.py\n+++ b/tests/model_inheritance/tests.py\n@@ -7,7 +7,7 @@\n \n from .models import (\n Base, Chef, CommonInfo, GrandChild, GrandParent, ItalianRestaurant,\n- MixinModel, ParkingLot, Place, Post, Restaurant, Student, SubBase,\n+ MixinModel, Parent, ParkingLot, Place, Post, Restaurant, Student, SubBase,\n Supplier, Title, Worker,\n )\n \n@@ -204,6 +204,19 @@ class A(models.Model):\n \n self.assertEqual(A.attr.called, (A, 'attr'))\n \n+ def test_inherited_ordering_pk_desc(self):\n+ p1 = Parent.objects.create(first_name='Joe', email='joe@email.com')\n+ p2 = Parent.objects.create(first_name='Jon', email='jon@email.com')\n+ expected_order_by_sql = 'ORDER BY %s.%s DESC' % (\n+ connection.ops.quote_name(Parent._meta.db_table),\n+ connection.ops.quote_name(\n+ Parent._meta.get_field('grandparent_ptr').column\n+ ),\n+ )\n+ qs = Parent.objects.all()\n+ self.assertSequenceEqual(qs, [p2, p1])\n+ self.assertIn(expected_order_by_sql, str(qs.query))\n+\n \n class ModelInheritanceDataTests(TestCase):\n @classmethod\n", + "fail_to_pass": "[\"test_inherited_ordering_pk_desc (model_inheritance.tests.ModelInheritanceTests)\"]", + "pass_to_pass": "[\"test_abstract_fk_related_name (model_inheritance.tests.InheritanceSameModelNameTests)\", \"test_unique (model_inheritance.tests.InheritanceUniqueTests)\", \"test_unique_together (model_inheritance.tests.InheritanceUniqueTests)\", \"test_abstract (model_inheritance.tests.ModelInheritanceTests)\", \"test_abstract_parent_link (model_inheritance.tests.ModelInheritanceTests)\", \"Creating a child with non-abstract parents only issues INSERTs.\", \"test_custompk_m2m (model_inheritance.tests.ModelInheritanceTests)\", \"test_eq (model_inheritance.tests.ModelInheritanceTests)\", \"test_init_subclass (model_inheritance.tests.ModelInheritanceTests)\", \"test_meta_fields_and_ordering (model_inheritance.tests.ModelInheritanceTests)\", \"test_mixin_init (model_inheritance.tests.ModelInheritanceTests)\", \"test_model_with_distinct_accessors (model_inheritance.tests.ModelInheritanceTests)\", \"test_model_with_distinct_related_query_name (model_inheritance.tests.ModelInheritanceTests)\", \"test_reverse_relation_for_different_hierarchy_tree (model_inheritance.tests.ModelInheritanceTests)\", \"test_set_name (model_inheritance.tests.ModelInheritanceTests)\", \"test_update_parent_filtering (model_inheritance.tests.ModelInheritanceTests)\", \"test_exclude_inherited_on_null (model_inheritance.tests.ModelInheritanceDataTests)\", \"test_filter_inherited_model (model_inheritance.tests.ModelInheritanceDataTests)\", \"test_filter_inherited_on_null (model_inheritance.tests.ModelInheritanceDataTests)\", \"test_filter_on_parent_returns_object_of_parent_type (model_inheritance.tests.ModelInheritanceDataTests)\", \"test_inherited_does_not_exist_exception (model_inheritance.tests.ModelInheritanceDataTests)\", \"test_inherited_multiple_objects_returned_exception (model_inheritance.tests.ModelInheritanceDataTests)\", \"test_parent_cache_reuse (model_inheritance.tests.ModelInheritanceDataTests)\", \"test_parent_child_one_to_one_link (model_inheritance.tests.ModelInheritanceDataTests)\", \"test_parent_child_one_to_one_link_on_nonrelated_objects (model_inheritance.tests.ModelInheritanceDataTests)\", \"test_parent_fields_available_for_filtering_in_child_model (model_inheritance.tests.ModelInheritanceDataTests)\", \"test_related_objects_for_inherited_models (model_inheritance.tests.ModelInheritanceDataTests)\", \"test_select_related_defer (model_inheritance.tests.ModelInheritanceDataTests)\", \"test_select_related_works_on_parent_model_fields (model_inheritance.tests.ModelInheritanceDataTests)\", \"test_update_inherited_model (model_inheritance.tests.ModelInheritanceDataTests)\", \"test_update_query_counts (model_inheritance.tests.ModelInheritanceDataTests)\", \"test_update_works_on_parent_and_child_models_at_once (model_inheritance.tests.ModelInheritanceDataTests)\", \"test_values_works_on_parent_model_fields (model_inheritance.tests.ModelInheritanceDataTests)\"]", + "expected_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.find_ordering_name" + ] + }, + "test_file_spans": { + "tests/model_inheritance/models.py": [ + "GrandParent", + "GrandParent.Meta" + ], + "tests/model_inheritance/tests.py": [ + "imports", + "ModelInheritanceTests" + ] + }, + "resolved_by": [ + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/models/sql/compiler.py": [ + "imports", + "SQLCompiler.get_order_by" + ] + }, + "alternative_spans": { + "django/db/models/sql/compiler.py": [ + "imports", + "SQLCompiler.get_order_by" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "spans": { + "django/db/models/sql/compiler.py": [ + "imports", + "SQLCompiler.get_order_by" + ] + } + } + ] + }, + { + "instance_id": "sympy__sympy-18698", + "repo": "sympy/sympy", + "base_commit": "3dff1b98a78f28c953ae2140b69356b8391e399c", + "problem_statement": "sqf and sqf_list output is not consistant\nThe example below is wrong in the sense that we should have (x*_2 - 5_x + 6, 3) and not 2 factors of multiplicity 3.\n\n```\n> sqf_list( (x**2 + 1) * (x - 1)**2 * (x - 2)**3 * (x - 3)**3 )\n\n> (1, [(x**2 + 1, 1), (x - 1, 2), (x - 3, 3), (x - 2, 3)])\n```\n\nwhereas below is correct --- one factor of multiplicity 2\n\n```\n> sqf_list( x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2 )\n\n> (1, [(x - 2, 1), (x**2 - 1, 2)])\n```\n\n", + "golden_patch": "diff --git a/sympy/polys/polytools.py b/sympy/polys/polytools.py\n--- a/sympy/polys/polytools.py\n+++ b/sympy/polys/polytools.py\n@@ -2,7 +2,8 @@\n \n from __future__ import print_function, division\n \n-from functools import wraps\n+from functools import wraps, reduce\n+from operator import mul\n \n from sympy.core import (\n S, Basic, Expr, I, Integer, Add, Mul, Dummy, Tuple\n@@ -5905,10 +5906,7 @@ def _symbolic_factor_list(expr, opt, method):\n if arg.is_Number:\n coeff *= arg\n continue\n- if arg.is_Mul:\n- args.extend(arg.args)\n- continue\n- if arg.is_Pow:\n+ elif arg.is_Pow:\n base, exp = arg.args\n if base.is_Number and exp.is_Number:\n coeff *= arg\n@@ -5949,6 +5947,9 @@ def _symbolic_factor_list(expr, opt, method):\n other.append((f, k))\n \n factors.append((_factors_product(other), exp))\n+ if method == 'sqf':\n+ factors = [(reduce(mul, (f for f, _ in factors if _ == k)), k)\n+ for k in set(i for _, i in factors)]\n \n return coeff, factors\n \n", + "test_patch": "diff --git a/sympy/polys/tests/test_polytools.py b/sympy/polys/tests/test_polytools.py\n--- a/sympy/polys/tests/test_polytools.py\n+++ b/sympy/polys/tests/test_polytools.py\n@@ -3273,7 +3273,7 @@ def test_to_rational_coeffs():\n def test_factor_terms():\n # issue 7067\n assert factor_list(x*(x + y)) == (1, [(x, 1), (x + y, 1)])\n- assert sqf_list(x*(x + y)) == (1, [(x, 1), (x + y, 1)])\n+ assert sqf_list(x*(x + y)) == (1, [(x**2 + x*y, 1)])\n \n \n def test_as_list():\n@@ -3333,3 +3333,8 @@ def test_issue_17988():\n def test_issue_18205():\n assert cancel((2 + I)*(3 - I)) == 7 + I\n assert cancel((2 + I)*(2 - I)) == 5\n+\n+def test_issue_8695():\n+ p = (x**2 + 1) * (x - 1)**2 * (x - 2)**3 * (x - 3)**3\n+ result = (1, [(x**2 + 1, 1), (x - 1, 2), (x**2 - 5*x + 6, 3)])\n+ assert sqf_list(p) == result\n", + "fail_to_pass": "[\"test_factor_terms\"]", + "pass_to_pass": "[\"test_Poly_mixed_operations\", \"test_Poly_from_dict\", \"test_Poly_from_list\", \"test_Poly_from_poly\", \"test_Poly_from_expr\", \"test_Poly__new__\", \"test_Poly__args\", \"test_Poly__gens\", \"test_Poly_zero\", \"test_Poly_one\", \"test_Poly__unify\", \"test_Poly_free_symbols\", \"test_PurePoly_free_symbols\", \"test_Poly__eq__\", \"test_PurePoly__eq__\", \"test_PurePoly_Poly\", \"test_Poly_get_domain\", \"test_Poly_set_domain\", \"test_Poly_get_modulus\", \"test_Poly_set_modulus\", \"test_Poly_add_ground\", \"test_Poly_sub_ground\", \"test_Poly_mul_ground\", \"test_Poly_quo_ground\", \"test_Poly_exquo_ground\", \"test_Poly_abs\", \"test_Poly_neg\", \"test_Poly_add\", \"test_Poly_sub\", \"test_Poly_mul\", \"test_issue_13079\", \"test_Poly_sqr\", \"test_Poly_pow\", \"test_Poly_divmod\", \"test_Poly_eq_ne\", \"test_Poly_nonzero\", \"test_Poly_properties\", \"test_Poly_is_irreducible\", \"test_Poly_subs\", \"test_Poly_replace\", \"test_Poly_reorder\", \"test_Poly_ltrim\", \"test_Poly_has_only_gens\", \"test_Poly_to_ring\", \"test_Poly_to_field\", \"test_Poly_to_exact\", \"test_Poly_retract\", \"test_Poly_slice\", \"test_Poly_coeffs\", \"test_Poly_monoms\", \"test_Poly_terms\", \"test_Poly_all_coeffs\", \"test_Poly_all_monoms\", \"test_Poly_all_terms\", \"test_Poly_termwise\", \"test_Poly_length\", \"test_Poly_as_dict\", \"test_Poly_as_expr\", \"test_Poly_lift\", \"test_Poly_deflate\", \"test_Poly_inject\", \"test_Poly_eject\", \"test_Poly_exclude\", \"test_Poly__gen_to_level\", \"test_Poly_degree\", \"test_Poly_degree_list\", \"test_Poly_total_degree\", \"test_Poly_homogenize\", \"test_Poly_homogeneous_order\", \"test_Poly_LC\", \"test_Poly_TC\", \"test_Poly_EC\", \"test_Poly_coeff\", \"test_Poly_nth\", \"test_Poly_LM\", \"test_Poly_LM_custom_order\", \"test_Poly_EM\", \"test_Poly_LT\", \"test_Poly_ET\", \"test_Poly_max_norm\", \"test_Poly_l1_norm\", \"test_Poly_clear_denoms\", \"test_Poly_rat_clear_denoms\", \"test_Poly_integrate\", \"test_Poly_diff\", \"test_issue_9585\", \"test_Poly_eval\", \"test_Poly___call__\", \"test_parallel_poly_from_expr\", \"test_pdiv\", \"test_div\", \"test_issue_7864\", \"test_gcdex\", \"test_revert\", \"test_subresultants\", \"test_resultant\", \"test_discriminant\", \"test_dispersion\", \"test_gcd_list\", \"test_lcm_list\", \"test_gcd\", \"test_gcd_numbers_vs_polys\", \"test_terms_gcd\", \"test_trunc\", \"test_monic\", \"test_content\", \"test_primitive\", \"test_compose\", \"test_shift\", \"test_transform\", \"test_sturm\", \"test_gff\", \"test_norm\", \"test_sqf_norm\", \"test_sqf\", \"test_factor\", \"test_factor_large\", \"test_factor_noeval\", \"test_intervals\", \"test_refine_root\", \"test_count_roots\", \"test_Poly_root\", \"test_real_roots\", \"test_all_roots\", \"test_nroots\", \"test_ground_roots\", \"test_nth_power_roots_poly\", \"test_torational_factor_list\", \"test_cancel\", \"test_reduced\", \"test_groebner\", \"test_fglm\", \"test_is_zero_dimensional\", \"test_GroebnerBasis\", \"test_poly\", \"test_keep_coeff\", \"test_poly_matching_consistency\", \"test_noncommutative\", \"test_to_rational_coeffs\", \"test_as_list\", \"test_issue_11198\", \"test_Poly_precision\", \"test_issue_12400\", \"test_issue_14364\", \"test_issue_15669\", \"test_issue_17988\", \"test_issue_18205\"]", + "expected_spans": { + "sympy/polys/polytools.py": [ + "imports", + "_symbolic_factor_list" + ] + }, + "test_file_spans": { + "sympy/polys/tests/test_polytools.py": [ + "test_factor_terms" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-12497", + "repo": "django/django", + "base_commit": "a4881f5e5d7ee38b7e83301331a0b4962845ef8a", + "problem_statement": "Wrong hint about recursive relationship.\nDescription\n\t \n\t\t(last modified by Matheus Cunha Motta)\n\t \nWhen there's more than 2 ForeignKeys in an intermediary model of a m2m field and no through_fields have been set, Django will show an error with the following hint:\nhint=(\n\t'If you want to create a recursive relationship, '\n\t'use ForeignKey(\"%s\", symmetrical=False, through=\"%s\").'\nBut 'symmetrical' and 'through' are m2m keyword arguments, not ForeignKey.\nThis was probably a small mistake where the developer thought ManyToManyField but typed ForeignKey instead. And the symmetrical=False is an outdated requirement to recursive relationships with intermediary model to self, not required since 3.0. I'll provide a PR with a proposed correction shortly after.\nEdit: fixed description.\n", + "golden_patch": "diff --git a/django/db/models/fields/related.py b/django/db/models/fields/related.py\n--- a/django/db/models/fields/related.py\n+++ b/django/db/models/fields/related.py\n@@ -1309,7 +1309,7 @@ def _check_relationship_model(self, from_model=None, **kwargs):\n \"through_fields keyword argument.\") % (self, from_model_name),\n hint=(\n 'If you want to create a recursive relationship, '\n- 'use ForeignKey(\"%s\", symmetrical=False, through=\"%s\").'\n+ 'use ManyToManyField(\"%s\", through=\"%s\").'\n ) % (\n RECURSIVE_RELATIONSHIP_CONSTANT,\n relationship_model_name,\n@@ -1329,7 +1329,7 @@ def _check_relationship_model(self, from_model=None, **kwargs):\n \"through_fields keyword argument.\" % (self, to_model_name),\n hint=(\n 'If you want to create a recursive relationship, '\n- 'use ForeignKey(\"%s\", symmetrical=False, through=\"%s\").'\n+ 'use ManyToManyField(\"%s\", through=\"%s\").'\n ) % (\n RECURSIVE_RELATIONSHIP_CONSTANT,\n relationship_model_name,\n", + "test_patch": "diff --git a/tests/invalid_models_tests/test_relative_fields.py b/tests/invalid_models_tests/test_relative_fields.py\n--- a/tests/invalid_models_tests/test_relative_fields.py\n+++ b/tests/invalid_models_tests/test_relative_fields.py\n@@ -128,7 +128,36 @@ class ThroughModel(models.Model):\n ),\n ])\n \n- def test_ambiguous_relationship_model(self):\n+ def test_ambiguous_relationship_model_from(self):\n+ class Person(models.Model):\n+ pass\n+\n+ class Group(models.Model):\n+ field = models.ManyToManyField('Person', through='AmbiguousRelationship')\n+\n+ class AmbiguousRelationship(models.Model):\n+ person = models.ForeignKey(Person, models.CASCADE)\n+ first_group = models.ForeignKey(Group, models.CASCADE, related_name='first')\n+ second_group = models.ForeignKey(Group, models.CASCADE, related_name='second')\n+\n+ field = Group._meta.get_field('field')\n+ self.assertEqual(field.check(from_model=Group), [\n+ Error(\n+ \"The model is used as an intermediate model by \"\n+ \"'invalid_models_tests.Group.field', but it has more than one \"\n+ \"foreign key from 'Group', which is ambiguous. You must \"\n+ \"specify which foreign key Django should use via the \"\n+ \"through_fields keyword argument.\",\n+ hint=(\n+ 'If you want to create a recursive relationship, use '\n+ 'ManyToManyField(\"self\", through=\"AmbiguousRelationship\").'\n+ ),\n+ obj=field,\n+ id='fields.E334',\n+ ),\n+ ])\n+\n+ def test_ambiguous_relationship_model_to(self):\n \n class Person(models.Model):\n pass\n@@ -152,7 +181,7 @@ class AmbiguousRelationship(models.Model):\n \"keyword argument.\",\n hint=(\n 'If you want to create a recursive relationship, use '\n- 'ForeignKey(\"self\", symmetrical=False, through=\"AmbiguousRelationship\").'\n+ 'ManyToManyField(\"self\", through=\"AmbiguousRelationship\").'\n ),\n obj=field,\n id='fields.E335',\n", + "fail_to_pass": "[\"test_ambiguous_relationship_model_from (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_ambiguous_relationship_model_to (invalid_models_tests.test_relative_fields.RelativeFieldTests)\"]", + "pass_to_pass": "[\"test_accessor_clash (invalid_models_tests.test_relative_fields.SelfReferentialFKClashTests)\", \"test_clash_under_explicit_related_name (invalid_models_tests.test_relative_fields.SelfReferentialFKClashTests)\", \"test_reverse_query_name_clash (invalid_models_tests.test_relative_fields.SelfReferentialFKClashTests)\", \"test_explicit_field_names (invalid_models_tests.test_relative_fields.M2mThroughFieldsTests)\", \"test_intersection_foreign_object (invalid_models_tests.test_relative_fields.M2mThroughFieldsTests)\", \"test_invalid_field (invalid_models_tests.test_relative_fields.M2mThroughFieldsTests)\", \"test_invalid_order (invalid_models_tests.test_relative_fields.M2mThroughFieldsTests)\", \"test_m2m_field_argument_validation (invalid_models_tests.test_relative_fields.M2mThroughFieldsTests)\", \"test_superset_foreign_object (invalid_models_tests.test_relative_fields.M2mThroughFieldsTests)\", \"test_clash_parent_link (invalid_models_tests.test_relative_fields.ComplexClashTests)\", \"test_complex_clash (invalid_models_tests.test_relative_fields.ComplexClashTests)\", \"test_accessor_clash (invalid_models_tests.test_relative_fields.SelfReferentialM2MClashTests)\", \"test_clash_between_accessors (invalid_models_tests.test_relative_fields.SelfReferentialM2MClashTests)\", \"test_clash_under_explicit_related_name (invalid_models_tests.test_relative_fields.SelfReferentialM2MClashTests)\", \"test_reverse_query_name_clash (invalid_models_tests.test_relative_fields.SelfReferentialM2MClashTests)\", \"test_valid_model (invalid_models_tests.test_relative_fields.SelfReferentialM2MClashTests)\", \"test_fk_to_fk (invalid_models_tests.test_relative_fields.ExplicitRelatedNameClashTests)\", \"test_fk_to_integer (invalid_models_tests.test_relative_fields.ExplicitRelatedNameClashTests)\", \"test_fk_to_m2m (invalid_models_tests.test_relative_fields.ExplicitRelatedNameClashTests)\", \"test_m2m_to_fk (invalid_models_tests.test_relative_fields.ExplicitRelatedNameClashTests)\", \"test_m2m_to_integer (invalid_models_tests.test_relative_fields.ExplicitRelatedNameClashTests)\", \"test_m2m_to_m2m (invalid_models_tests.test_relative_fields.ExplicitRelatedNameClashTests)\", \"test_clash_between_accessors (invalid_models_tests.test_relative_fields.AccessorClashTests)\", \"test_fk_to_fk (invalid_models_tests.test_relative_fields.AccessorClashTests)\", \"test_fk_to_integer (invalid_models_tests.test_relative_fields.AccessorClashTests)\", \"test_fk_to_m2m (invalid_models_tests.test_relative_fields.AccessorClashTests)\", \"test_m2m_to_fk (invalid_models_tests.test_relative_fields.AccessorClashTests)\", \"test_m2m_to_integer (invalid_models_tests.test_relative_fields.AccessorClashTests)\", \"test_m2m_to_m2m (invalid_models_tests.test_relative_fields.AccessorClashTests)\", \"Ref #22047.\", \"test_no_clash_for_hidden_related_name (invalid_models_tests.test_relative_fields.AccessorClashTests)\", \"test_fk_to_fk (invalid_models_tests.test_relative_fields.ReverseQueryNameClashTests)\", \"test_fk_to_integer (invalid_models_tests.test_relative_fields.ReverseQueryNameClashTests)\", \"test_fk_to_m2m (invalid_models_tests.test_relative_fields.ReverseQueryNameClashTests)\", \"test_m2m_to_fk (invalid_models_tests.test_relative_fields.ReverseQueryNameClashTests)\", \"test_m2m_to_integer (invalid_models_tests.test_relative_fields.ReverseQueryNameClashTests)\", \"test_m2m_to_m2m (invalid_models_tests.test_relative_fields.ReverseQueryNameClashTests)\", \"test_fk_to_fk (invalid_models_tests.test_relative_fields.ExplicitRelatedQueryNameClashTests)\", \"test_fk_to_integer (invalid_models_tests.test_relative_fields.ExplicitRelatedQueryNameClashTests)\", \"test_fk_to_m2m (invalid_models_tests.test_relative_fields.ExplicitRelatedQueryNameClashTests)\", \"test_hidden_fk_to_fk (invalid_models_tests.test_relative_fields.ExplicitRelatedQueryNameClashTests)\", \"test_hidden_fk_to_integer (invalid_models_tests.test_relative_fields.ExplicitRelatedQueryNameClashTests)\", \"test_hidden_fk_to_m2m (invalid_models_tests.test_relative_fields.ExplicitRelatedQueryNameClashTests)\", \"test_hidden_m2m_to_fk (invalid_models_tests.test_relative_fields.ExplicitRelatedQueryNameClashTests)\", \"test_hidden_m2m_to_integer (invalid_models_tests.test_relative_fields.ExplicitRelatedQueryNameClashTests)\", \"test_hidden_m2m_to_m2m (invalid_models_tests.test_relative_fields.ExplicitRelatedQueryNameClashTests)\", \"test_m2m_to_fk (invalid_models_tests.test_relative_fields.ExplicitRelatedQueryNameClashTests)\", \"test_m2m_to_integer (invalid_models_tests.test_relative_fields.ExplicitRelatedQueryNameClashTests)\", \"test_m2m_to_m2m (invalid_models_tests.test_relative_fields.ExplicitRelatedQueryNameClashTests)\", \"test_foreign_key_to_abstract_model (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_foreign_key_to_isolate_apps_model (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_foreign_key_to_missing_model (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_foreign_key_to_non_unique_field (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_foreign_key_to_non_unique_field_under_explicit_model (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_foreign_key_to_partially_unique_field (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_foreign_key_to_unique_field_with_meta_constraint (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_foreign_object_to_non_unique_fields (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_foreign_object_to_partially_unique_field (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_foreign_object_to_unique_field_with_meta_constraint (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_invalid_related_query_name (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_m2m_to_abstract_model (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_many_to_many_through_isolate_apps_model (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_many_to_many_to_isolate_apps_model (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_many_to_many_to_missing_model (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_many_to_many_with_limit_choices_auto_created_no_warning (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_many_to_many_with_useless_options (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_missing_relationship_model (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_missing_relationship_model_on_model_check (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_not_swapped_model (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_nullable_primary_key (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_on_delete_set_default_without_default_value (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_on_delete_set_null_on_non_nullable_field (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_referencing_to_swapped_model (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_related_field_has_invalid_related_name (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_related_field_has_valid_related_name (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_relationship_model_missing_foreign_key (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_relationship_model_with_foreign_key_to_wrong_model (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_to_fields_exist (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_to_fields_not_checked_if_related_model_doesnt_exist (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_too_many_foreign_keys_in_self_referential_model (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_unique_m2m (invalid_models_tests.test_relative_fields.RelativeFieldTests)\", \"test_valid_foreign_key_without_accessor (invalid_models_tests.test_relative_fields.RelativeFieldTests)\"]", + "expected_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "test_file_spans": { + "tests/invalid_models_tests/test_relative_fields.py": [ + "RelativeFieldTests.test_ambiguous_relationship_model" + ] + }, + "resolved_by": [ + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/db/models/fields/related.py": [ + "imports", + "resolve_relation", + "RelatedField", + "RelatedField.related_model", + "RelatedField.check", + "RelatedField._check_related_name_is_valid", + "RelatedField._check_related_query_name_is_valid", + "RelatedField._check_relation_model_exists", + "RelatedField._check_referencing_to_swapped_model", + "RelatedField._check_clashes", + "RelatedField.db_type", + "RelatedField.contribute_to_class", + "RelatedField.get_forward_related_filter", + "RelatedField.get_reverse_related_filter", + "RelatedField.swappable_setting", + "RelatedField.set_attributes_from_rel", + "RelatedField.formfield", + "RelatedField.related_query_name", + "RelatedField.target_field", + "ForeignObject", + "ForeignObject.__init__", + "ForeignObject.check", + "ForeignObject._check_to_fields_exist", + "ForeignObject._check_unique_target", + "ForeignObject.deconstruct", + "ForeignObject.resolve_related_fields", + "ForeignObject.reverse_related_fields", + "ForeignObject.foreign_related_fields", + "ForeignObject.get_local_related_value", + "ForeignObject.get_foreign_related_value", + "ForeignObject.get_instance_value_for_fields", + "ForeignObject.get_joining_columns", + "ForeignObject.get_path_info", + "ForeignObject.get_reverse_path_info", + "ForeignObject.get_lookups", + "ForeignObject.contribute_to_class", + "ForeignObject.contribute_to_related_class", + "ForeignKey", + "ForeignKey.__init__", + "ForeignKey.check", + "ForeignKey._check_on_delete", + "ForeignKey._check_unique", + "ForeignKey.deconstruct", + "ForeignKey.get_reverse_path_info", + "ForeignKey.validate", + "ForeignKey.resolve_related_fields", + "ForeignKey.get_db_prep_save", + "ForeignKey.formfield", + "ForeignKey.db_parameters", + "ForeignKey.convert_empty_strings", + "OneToOneField", + "OneToOneField.deconstruct", + "OneToOneField.save_form_data", + "OneToOneField._check_unique", + "create_many_to_many_intermediary_model", + "ManyToManyField", + "ManyToManyField.__init__", + "ManyToManyField.check", + "ManyToManyField._check_unique", + "ManyToManyField._check_ignored_options", + "ManyToManyField._check_relationship_model", + "ManyToManyField._check_table_uniqueness", + "ManyToManyField.deconstruct", + "ManyToManyField._get_path_info", + "ManyToManyField.get_path_info", + "ManyToManyField.get_reverse_path_info", + "ManyToManyField._get_m2m_db_table", + "ManyToManyField._get_m2m_attr", + "ManyToManyField._get_m2m_reverse_attr", + "ManyToManyField.contribute_to_class", + "ManyToManyField.contribute_to_related_class", + "ManyToManyField.formfield", + "ManyToManyField.db_type" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "imports", + "resolve_relation", + "RelatedField", + "RelatedField.related_model", + "RelatedField.check", + "RelatedField._check_related_name_is_valid", + "RelatedField._check_related_query_name_is_valid", + "RelatedField._check_relation_model_exists", + "RelatedField._check_referencing_to_swapped_model", + "RelatedField._check_clashes", + "RelatedField.db_type", + "RelatedField.contribute_to_class", + "RelatedField.get_forward_related_filter", + "RelatedField.get_reverse_related_filter", + "RelatedField.swappable_setting", + "RelatedField.set_attributes_from_rel", + "RelatedField.formfield", + "RelatedField.related_query_name", + "RelatedField.target_field", + "ForeignObject", + "ForeignObject.__init__", + "ForeignObject.check", + "ForeignObject._check_to_fields_exist", + "ForeignObject._check_unique_target", + "ForeignObject.deconstruct", + "ForeignObject.resolve_related_fields", + "ForeignObject.reverse_related_fields", + "ForeignObject.foreign_related_fields", + "ForeignObject.get_local_related_value", + "ForeignObject.get_foreign_related_value", + "ForeignObject.get_instance_value_for_fields", + "ForeignObject.get_joining_columns", + "ForeignObject.get_path_info", + "ForeignObject.get_reverse_path_info", + "ForeignObject.get_lookups", + "ForeignObject.contribute_to_class", + "ForeignObject.contribute_to_related_class", + "ForeignKey", + "ForeignKey.__init__", + "ForeignKey.check", + "ForeignKey._check_on_delete", + "ForeignKey._check_unique", + "ForeignKey.deconstruct", + "ForeignKey.get_reverse_path_info", + "ForeignKey.validate", + "ForeignKey.resolve_related_fields", + "ForeignKey.get_db_prep_save", + "ForeignKey.formfield", + "ForeignKey.db_parameters", + "ForeignKey.convert_empty_strings", + "OneToOneField", + "OneToOneField.deconstruct", + "OneToOneField.save_form_data", + "OneToOneField._check_unique", + "create_many_to_many_intermediary_model", + "ManyToManyField", + "ManyToManyField.__init__", + "ManyToManyField.check", + "ManyToManyField._check_unique", + "ManyToManyField._check_ignored_options", + "ManyToManyField._check_relationship_model", + "ManyToManyField._check_table_uniqueness", + "ManyToManyField.deconstruct", + "ManyToManyField._get_path_info", + "ManyToManyField.get_path_info", + "ManyToManyField.get_reverse_path_info", + "ManyToManyField._get_m2m_db_table", + "ManyToManyField._get_m2m_attr", + "ManyToManyField._get_m2m_reverse_attr", + "ManyToManyField.contribute_to_class", + "ManyToManyField.contribute_to_related_class", + "ManyToManyField.formfield", + "ManyToManyField.db_type" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + }, + "alternative_spans": { + "django/db/models/fields/related.py": [ + "ManyToManyField._check_relationship_model" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-18835", + "repo": "sympy/sympy", + "base_commit": "516fa83e69caf1e68306cfc912a13f36c434d51c", + "problem_statement": "uniq modifies list argument\nWhen you iterate over a dictionary or set and try to modify it while doing so you get an error from Python:\r\n```python\r\n>>> multiset('THISTLE')\r\n{'T': 2, 'H': 1, 'I': 1, 'S': 1, 'L': 1, 'E': 1}\r\n>>> for i in _:\r\n... _.pop(i)\r\n...\r\n2\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\nRuntimeError: dictionary changed size during iteration\r\n```\r\nIt would be good to do the same thing from within `uniq` because the output will silently be wrong if you modify a passed list:\r\n```python\r\n>>> f=list('THISTLE')\r\n>>> for i in uniq(f):\r\n... f.remove(i)\r\n... i\r\n...\r\n'T'\r\n'I'\r\n'L'\r\n```\r\nI think this would entail recording the size at the start and then checking the size and raising a similar RuntimeError if the size changes.\n", + "golden_patch": "diff --git a/sympy/utilities/iterables.py b/sympy/utilities/iterables.py\n--- a/sympy/utilities/iterables.py\n+++ b/sympy/utilities/iterables.py\n@@ -2088,8 +2088,13 @@ def has_variety(seq):\n def uniq(seq, result=None):\n \"\"\"\n Yield unique elements from ``seq`` as an iterator. The second\n- parameter ``result`` is used internally; it is not necessary to pass\n- anything for this.\n+ parameter ``result`` is used internally; it is not necessary\n+ to pass anything for this.\n+\n+ Note: changing the sequence during iteration will raise a\n+ RuntimeError if the size of the sequence is known; if you pass\n+ an iterator and advance the iterator you will change the\n+ output of this routine but there will be no warning.\n \n Examples\n ========\n@@ -2106,15 +2111,27 @@ def uniq(seq, result=None):\n >>> list(uniq([[1], [2, 1], [1]]))\n [[1], [2, 1]]\n \"\"\"\n+ try:\n+ n = len(seq)\n+ except TypeError:\n+ n = None\n+ def check():\n+ # check that size of seq did not change during iteration;\n+ # if n == None the object won't support size changing, e.g.\n+ # an iterator can't be changed\n+ if n is not None and len(seq) != n:\n+ raise RuntimeError('sequence changed size during iteration')\n try:\n seen = set()\n result = result or []\n for i, s in enumerate(seq):\n if not (s in seen or seen.add(s)):\n yield s\n+ check()\n except TypeError:\n if s not in result:\n yield s\n+ check()\n result.append(s)\n if hasattr(seq, '__getitem__'):\n for s in uniq(seq[i + 1:], result):\n", + "test_patch": "diff --git a/sympy/utilities/tests/test_iterables.py b/sympy/utilities/tests/test_iterables.py\n--- a/sympy/utilities/tests/test_iterables.py\n+++ b/sympy/utilities/tests/test_iterables.py\n@@ -703,6 +703,10 @@ def test_uniq():\n [([1], 2, 2), (2, [1], 2), (2, 2, [1])]\n assert list(uniq([2, 3, 2, 4, [2], [1], [2], [3], [1]])) == \\\n [2, 3, 4, [2], [1], [3]]\n+ f = [1]\n+ raises(RuntimeError, lambda: [f.remove(i) for i in uniq(f)])\n+ f = [[1]]\n+ raises(RuntimeError, lambda: [f.remove(i) for i in uniq(f)])\n \n \n def test_kbins():\n", + "fail_to_pass": "[\"test_uniq\"]", + "pass_to_pass": "[\"test_is_palindromic\", \"test_postorder_traversal\", \"test_flatten\", \"test_iproduct\", \"test_group\", \"test_subsets\", \"test_variations\", \"test_cartes\", \"test_filter_symbols\", \"test_numbered_symbols\", \"test_sift\", \"test_take\", \"test_dict_merge\", \"test_prefixes\", \"test_postfixes\", \"test_topological_sort\", \"test_strongly_connected_components\", \"test_connected_components\", \"test_rotate\", \"test_multiset_partitions\", \"test_multiset_combinations\", \"test_multiset_permutations\", \"test_partitions\", \"test_binary_partitions\", \"test_bell_perm\", \"test_involutions\", \"test_derangements\", \"test_necklaces\", \"test_bracelets\", \"test_generate_oriented_forest\", \"test_unflatten\", \"test_common_prefix_suffix\", \"test_minlex\", \"test_ordered\", \"test_runs\", \"test_reshape\", \"test_kbins\", \"test_has_dups\", \"test__partition\", \"test_ordered_partitions\", \"test_rotations\"]", + "expected_spans": { + "sympy/utilities/iterables.py": [ + "uniq" + ] + }, + "test_file_spans": { + "sympy/utilities/tests/test_iterables.py": [ + "test_uniq" + ] + }, + "resolved_by": [ + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "uniq" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "uniq" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-12589", + "repo": "django/django", + "base_commit": "895f28f9cbed817c00ab68770433170d83132d90", + "problem_statement": "Django 3.0: \"GROUP BY\" clauses error with tricky field annotation\nDescription\n\t\nLet's pretend that we have next model structure with next model's relations:\nclass A(models.Model):\n\tbs = models.ManyToManyField('B',\n\t\t\t\t\t\t\t\trelated_name=\"a\",\n\t\t\t\t\t\t\t\tthrough=\"AB\")\nclass B(models.Model):\n\tpass\nclass AB(models.Model):\n\ta = models.ForeignKey(A, on_delete=models.CASCADE, related_name=\"ab_a\")\n\tb = models.ForeignKey(B, on_delete=models.CASCADE, related_name=\"ab_b\")\n\tstatus = models.IntegerField()\nclass C(models.Model):\n\ta = models.ForeignKey(\n\t\tA,\n\t\tnull=True,\n\t\tblank=True,\n\t\ton_delete=models.SET_NULL,\n\t\trelated_name=\"c\",\n\t\tverbose_name=_(\"a\")\n\t)\n\tstatus = models.IntegerField()\nLet's try to evaluate next query\nab_query = AB.objects.filter(a=OuterRef(\"pk\"), b=1)\nfilter_conditions = Q(pk=1) | Q(ab_a__b=1)\nquery = A.objects.\\\n\tfilter(filter_conditions).\\\n\tannotate(\n\t\tstatus=Subquery(ab_query.values(\"status\")),\n\t\tc_count=Count(\"c\"),\n)\nanswer = query.values(\"status\").annotate(total_count=Count(\"status\"))\nprint(answer.query)\nprint(answer)\nOn Django 3.0.4 we have an error\ndjango.db.utils.ProgrammingError: column reference \"status\" is ambiguous\nand query is next:\nSELECT (SELECT U0.\"status\" FROM \"test_app_ab\" U0 WHERE (U0.\"a_id\" = \"test_app_a\".\"id\" AND U0.\"b_id\" = 1)) AS \"status\", COUNT((SELECT U0.\"status\" FROM \"test_app_ab\" U0 WHERE (U0.\"a_id\" = \"test_app_a\".\"id\" AND U0.\"b_id\" = 1))) AS \"total_count\" FROM \"test_app_a\" LEFT OUTER JOIN \"test_app_ab\" ON (\"test_app_a\".\"id\" = \"test_app_ab\".\"a_id\") LEFT OUTER JOIN \"test_app_c\" ON (\"test_app_a\".\"id\" = \"test_app_c\".\"a_id\") WHERE (\"test_app_a\".\"id\" = 1 OR \"test_app_ab\".\"b_id\" = 1) GROUP BY \"status\"\nHowever, Django 2.2.11 processed this query properly with the next query:\nSELECT (SELECT U0.\"status\" FROM \"test_app_ab\" U0 WHERE (U0.\"a_id\" = (\"test_app_a\".\"id\") AND U0.\"b_id\" = 1)) AS \"status\", COUNT((SELECT U0.\"status\" FROM \"test_app_ab\" U0 WHERE (U0.\"a_id\" = (\"test_app_a\".\"id\") AND U0.\"b_id\" = 1))) AS \"total_count\" FROM \"test_app_a\" LEFT OUTER JOIN \"test_app_ab\" ON (\"test_app_a\".\"id\" = \"test_app_ab\".\"a_id\") LEFT OUTER JOIN \"test_app_c\" ON (\"test_app_a\".\"id\" = \"test_app_c\".\"a_id\") WHERE (\"test_app_a\".\"id\" = 1 OR \"test_app_ab\".\"b_id\" = 1) GROUP BY (SELECT U0.\"status\" FROM \"test_app_ab\" U0 WHERE (U0.\"a_id\" = (\"test_app_a\".\"id\") AND U0.\"b_id\" = 1))\nso, the difference in \"GROUP BY\" clauses\n(as DB provider uses \"django.db.backends.postgresql\", postgresql 11)\n", + "golden_patch": "diff --git a/django/db/models/sql/query.py b/django/db/models/sql/query.py\n--- a/django/db/models/sql/query.py\n+++ b/django/db/models/sql/query.py\n@@ -1927,6 +1927,19 @@ def set_group_by(self, allow_aliases=True):\n primary key, and the query would be equivalent, the optimization\n will be made automatically.\n \"\"\"\n+ # Column names from JOINs to check collisions with aliases.\n+ if allow_aliases:\n+ column_names = set()\n+ seen_models = set()\n+ for join in list(self.alias_map.values())[1:]: # Skip base table.\n+ model = join.join_field.related_model\n+ if model not in seen_models:\n+ column_names.update({\n+ field.column\n+ for field in model._meta.local_concrete_fields\n+ })\n+ seen_models.add(model)\n+\n group_by = list(self.select)\n if self.annotation_select:\n for alias, annotation in self.annotation_select.items():\n@@ -1940,7 +1953,7 @@ def set_group_by(self, allow_aliases=True):\n warnings.warn(msg, category=RemovedInDjango40Warning)\n group_by_cols = annotation.get_group_by_cols()\n else:\n- if not allow_aliases:\n+ if not allow_aliases or alias in column_names:\n alias = None\n group_by_cols = annotation.get_group_by_cols(alias=alias)\n group_by.extend(group_by_cols)\n", + "test_patch": "diff --git a/tests/aggregation/models.py b/tests/aggregation/models.py\n--- a/tests/aggregation/models.py\n+++ b/tests/aggregation/models.py\n@@ -5,6 +5,7 @@ class Author(models.Model):\n name = models.CharField(max_length=100)\n age = models.IntegerField()\n friends = models.ManyToManyField('self', blank=True)\n+ rating = models.FloatField(null=True)\n \n def __str__(self):\n return self.name\ndiff --git a/tests/aggregation/tests.py b/tests/aggregation/tests.py\n--- a/tests/aggregation/tests.py\n+++ b/tests/aggregation/tests.py\n@@ -1191,6 +1191,22 @@ def test_aggregation_subquery_annotation_values(self):\n },\n ])\n \n+ def test_aggregation_subquery_annotation_values_collision(self):\n+ books_rating_qs = Book.objects.filter(\n+ publisher=OuterRef('pk'),\n+ price=Decimal('29.69'),\n+ ).values('rating')\n+ publisher_qs = Publisher.objects.filter(\n+ book__contact__age__gt=20,\n+ name=self.p1.name,\n+ ).annotate(\n+ rating=Subquery(books_rating_qs),\n+ contacts_count=Count('book__contact'),\n+ ).values('rating').annotate(total_count=Count('rating'))\n+ self.assertEqual(list(publisher_qs), [\n+ {'rating': 4.0, 'total_count': 2},\n+ ])\n+\n @skipUnlessDBFeature('supports_subqueries_in_group_by')\n @skipIf(\n connection.vendor == 'mysql' and 'ONLY_FULL_GROUP_BY' in connection.sql_mode,\n", + "fail_to_pass": "[\"test_aggregation_subquery_annotation_values_collision (aggregation.tests.AggregateTestCase)\"]", + "pass_to_pass": "[\"test_add_implementation (aggregation.tests.AggregateTestCase)\", \"test_aggregate_alias (aggregation.tests.AggregateTestCase)\", \"test_aggregate_annotation (aggregation.tests.AggregateTestCase)\", \"test_aggregate_in_order_by (aggregation.tests.AggregateTestCase)\", \"test_aggregate_multi_join (aggregation.tests.AggregateTestCase)\", \"test_aggregate_over_complex_annotation (aggregation.tests.AggregateTestCase)\", \"test_aggregation_exists_annotation (aggregation.tests.AggregateTestCase)\", \"test_aggregation_expressions (aggregation.tests.AggregateTestCase)\", \"test_aggregation_order_by_not_selected_annotation_values (aggregation.tests.AggregateTestCase)\", \"Subquery annotations are excluded from the GROUP BY if they are\", \"test_aggregation_subquery_annotation_exists (aggregation.tests.AggregateTestCase)\", \"test_aggregation_subquery_annotation_multivalued (aggregation.tests.AggregateTestCase)\", \"test_aggregation_subquery_annotation_related_field (aggregation.tests.AggregateTestCase)\", \"test_aggregation_subquery_annotation_values (aggregation.tests.AggregateTestCase)\", \"test_annotate_basic (aggregation.tests.AggregateTestCase)\", \"test_annotate_defer (aggregation.tests.AggregateTestCase)\", \"test_annotate_defer_select_related (aggregation.tests.AggregateTestCase)\", \"test_annotate_m2m (aggregation.tests.AggregateTestCase)\", \"test_annotate_ordering (aggregation.tests.AggregateTestCase)\", \"test_annotate_over_annotate (aggregation.tests.AggregateTestCase)\", \"test_annotate_values (aggregation.tests.AggregateTestCase)\", \"test_annotate_values_aggregate (aggregation.tests.AggregateTestCase)\", \"test_annotate_values_list (aggregation.tests.AggregateTestCase)\", \"test_annotated_aggregate_over_annotated_aggregate (aggregation.tests.AggregateTestCase)\", \"test_annotation (aggregation.tests.AggregateTestCase)\", \"test_annotation_expressions (aggregation.tests.AggregateTestCase)\", \"test_arguments_must_be_expressions (aggregation.tests.AggregateTestCase)\", \"test_avg_decimal_field (aggregation.tests.AggregateTestCase)\", \"test_avg_duration_field (aggregation.tests.AggregateTestCase)\", \"test_backwards_m2m_annotate (aggregation.tests.AggregateTestCase)\", \"test_combine_different_types (aggregation.tests.AggregateTestCase)\", \"test_complex_aggregations_require_kwarg (aggregation.tests.AggregateTestCase)\", \"test_complex_values_aggregation (aggregation.tests.AggregateTestCase)\", \"test_count (aggregation.tests.AggregateTestCase)\", \"test_count_distinct_expression (aggregation.tests.AggregateTestCase)\", \"test_count_star (aggregation.tests.AggregateTestCase)\", \"test_dates_with_aggregation (aggregation.tests.AggregateTestCase)\", \"test_decimal_max_digits_has_no_effect (aggregation.tests.AggregateTestCase)\", \"test_distinct_on_aggregate (aggregation.tests.AggregateTestCase)\", \"test_empty_aggregate (aggregation.tests.AggregateTestCase)\", \"test_even_more_aggregate (aggregation.tests.AggregateTestCase)\", \"test_expression_on_aggregation (aggregation.tests.AggregateTestCase)\", \"test_filter_aggregate (aggregation.tests.AggregateTestCase)\", \"test_filtering (aggregation.tests.AggregateTestCase)\", \"test_fkey_aggregate (aggregation.tests.AggregateTestCase)\", \"test_group_by_exists_annotation (aggregation.tests.AggregateTestCase)\", \"test_group_by_subquery_annotation (aggregation.tests.AggregateTestCase)\", \"test_grouped_annotation_in_group_by (aggregation.tests.AggregateTestCase)\", \"test_missing_output_field_raises_error (aggregation.tests.AggregateTestCase)\", \"test_more_aggregation (aggregation.tests.AggregateTestCase)\", \"test_multi_arg_aggregate (aggregation.tests.AggregateTestCase)\", \"test_multiple_aggregates (aggregation.tests.AggregateTestCase)\", \"test_non_grouped_annotation_not_in_group_by (aggregation.tests.AggregateTestCase)\", \"test_nonaggregate_aggregation_throws (aggregation.tests.AggregateTestCase)\", \"test_nonfield_annotation (aggregation.tests.AggregateTestCase)\", \"test_order_of_precedence (aggregation.tests.AggregateTestCase)\", \"test_related_aggregate (aggregation.tests.AggregateTestCase)\", \"test_reverse_fkey_annotate (aggregation.tests.AggregateTestCase)\", \"test_single_aggregate (aggregation.tests.AggregateTestCase)\", \"test_sum_distinct_aggregate (aggregation.tests.AggregateTestCase)\", \"test_sum_duration_field (aggregation.tests.AggregateTestCase)\", \"test_ticket11881 (aggregation.tests.AggregateTestCase)\", \"test_ticket12886 (aggregation.tests.AggregateTestCase)\", \"test_ticket17424 (aggregation.tests.AggregateTestCase)\", \"test_values_aggregation (aggregation.tests.AggregateTestCase)\", \"test_values_annotation_with_expression (aggregation.tests.AggregateTestCase)\"]", + "expected_spans": { + "django/db/models/sql/query.py": [ + "Query.set_group_by" + ] + }, + "test_file_spans": { + "tests/aggregation/models.py": [ + "Author" + ], + "tests/aggregation/tests.py": [ + "AggregateTestCase.test_aggregation_subquery_annotation_multivalued" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-19007", + "repo": "sympy/sympy", + "base_commit": "f9e030b57623bebdc2efa7f297c1b5ede08fcebf", + "problem_statement": "Wrong matrix element fetched from BlockMatrix\nGiven this code:\r\n```\r\nfrom sympy import *\r\nn, i = symbols('n, i', integer=True)\r\nA = MatrixSymbol('A', 1, 1)\r\nB = MatrixSymbol('B', n, 1)\r\nC = BlockMatrix([[A], [B]])\r\nprint('C is')\r\npprint(C)\r\nprint('C[i, 0] is')\r\npprint(C[i, 0])\r\n```\r\nI get this output:\r\n```\r\nC is\r\n\u23a1A\u23a4\r\n\u23a2 \u23a5\r\n\u23a3B\u23a6\r\nC[i, 0] is\r\n(A)[i, 0]\r\n```\r\n`(A)[i, 0]` is the wrong here. `C[i, 0]` should not be simplified as that element may come from either `A` or `B`.\n", + "golden_patch": "diff --git a/sympy/matrices/expressions/blockmatrix.py b/sympy/matrices/expressions/blockmatrix.py\n--- a/sympy/matrices/expressions/blockmatrix.py\n+++ b/sympy/matrices/expressions/blockmatrix.py\n@@ -7,7 +7,7 @@\n from sympy.utilities import sift\n from sympy.utilities.misc import filldedent\n \n-from sympy.matrices.expressions.matexpr import MatrixExpr, ZeroMatrix, Identity\n+from sympy.matrices.expressions.matexpr import MatrixExpr, ZeroMatrix, Identity, MatrixElement\n from sympy.matrices.expressions.matmul import MatMul\n from sympy.matrices.expressions.matadd import MatAdd\n from sympy.matrices.expressions.matpow import MatPow\n@@ -234,16 +234,24 @@ def transpose(self):\n \n def _entry(self, i, j, **kwargs):\n # Find row entry\n+ orig_i, orig_j = i, j\n for row_block, numrows in enumerate(self.rowblocksizes):\n- if (i < numrows) != False:\n+ cmp = i < numrows\n+ if cmp == True:\n break\n- else:\n+ elif cmp == False:\n i -= numrows\n+ elif row_block < self.blockshape[0] - 1:\n+ # Can't tell which block and it's not the last one, return unevaluated\n+ return MatrixElement(self, orig_i, orig_j)\n for col_block, numcols in enumerate(self.colblocksizes):\n- if (j < numcols) != False:\n+ cmp = j < numcols\n+ if cmp == True:\n break\n- else:\n+ elif cmp == False:\n j -= numcols\n+ elif col_block < self.blockshape[1] - 1:\n+ return MatrixElement(self, orig_i, orig_j)\n return self.blocks[row_block, col_block][i, j]\n \n @property\n", + "test_patch": "diff --git a/sympy/matrices/expressions/tests/test_blockmatrix.py b/sympy/matrices/expressions/tests/test_blockmatrix.py\n--- a/sympy/matrices/expressions/tests/test_blockmatrix.py\n+++ b/sympy/matrices/expressions/tests/test_blockmatrix.py\n@@ -192,7 +192,6 @@ def test_BlockDiagMatrix():\n def test_blockcut():\n A = MatrixSymbol('A', n, m)\n B = blockcut(A, (n/2, n/2), (m/2, m/2))\n- assert A[i, j] == B[i, j]\n assert B == BlockMatrix([[A[:n/2, :m/2], A[:n/2, m/2:]],\n [A[n/2:, :m/2], A[n/2:, m/2:]]])\n \ndiff --git a/sympy/matrices/expressions/tests/test_indexing.py b/sympy/matrices/expressions/tests/test_indexing.py\n--- a/sympy/matrices/expressions/tests/test_indexing.py\n+++ b/sympy/matrices/expressions/tests/test_indexing.py\n@@ -1,7 +1,7 @@\n from sympy import (symbols, MatrixSymbol, MatPow, BlockMatrix, KroneckerDelta,\n Identity, ZeroMatrix, ImmutableMatrix, eye, Sum, Dummy, trace,\n Symbol)\n-from sympy.testing.pytest import raises\n+from sympy.testing.pytest import raises, XFAIL\n from sympy.matrices.expressions.matexpr import MatrixElement, MatrixExpr\n \n k, l, m, n = symbols('k l m n', integer=True)\n@@ -83,6 +83,72 @@ def test_block_index():\n assert BI.as_explicit().equals(eye(6))\n \n \n+def test_block_index_symbolic():\n+ # Note that these matrices may be zero-sized and indices may be negative, which causes\n+ # all naive simplifications given in the comments to be invalid\n+ A1 = MatrixSymbol('A1', n, k)\n+ A2 = MatrixSymbol('A2', n, l)\n+ A3 = MatrixSymbol('A3', m, k)\n+ A4 = MatrixSymbol('A4', m, l)\n+ A = BlockMatrix([[A1, A2], [A3, A4]])\n+ assert A[0, 0] == MatrixElement(A, 0, 0) # Cannot be A1[0, 0]\n+ assert A[n - 1, k - 1] == A1[n - 1, k - 1]\n+ assert A[n, k] == A4[0, 0]\n+ assert A[n + m - 1, 0] == MatrixElement(A, n + m - 1, 0) # Cannot be A3[m - 1, 0]\n+ assert A[0, k + l - 1] == MatrixElement(A, 0, k + l - 1) # Cannot be A2[0, l - 1]\n+ assert A[n + m - 1, k + l - 1] == MatrixElement(A, n + m - 1, k + l - 1) # Cannot be A4[m - 1, l - 1]\n+ assert A[i, j] == MatrixElement(A, i, j)\n+ assert A[n + i, k + j] == MatrixElement(A, n + i, k + j) # Cannot be A4[i, j]\n+ assert A[n - i - 1, k - j - 1] == MatrixElement(A, n - i - 1, k - j - 1) # Cannot be A1[n - i - 1, k - j - 1]\n+\n+\n+def test_block_index_symbolic_nonzero():\n+ # All invalid simplifications from test_block_index_symbolic() that become valid if all\n+ # matrices have nonzero size and all indices are nonnegative\n+ k, l, m, n = symbols('k l m n', integer=True, positive=True)\n+ i, j = symbols('i j', integer=True, nonnegative=True)\n+ A1 = MatrixSymbol('A1', n, k)\n+ A2 = MatrixSymbol('A2', n, l)\n+ A3 = MatrixSymbol('A3', m, k)\n+ A4 = MatrixSymbol('A4', m, l)\n+ A = BlockMatrix([[A1, A2], [A3, A4]])\n+ assert A[0, 0] == A1[0, 0]\n+ assert A[n + m - 1, 0] == A3[m - 1, 0]\n+ assert A[0, k + l - 1] == A2[0, l - 1]\n+ assert A[n + m - 1, k + l - 1] == A4[m - 1, l - 1]\n+ assert A[i, j] == MatrixElement(A, i, j)\n+ assert A[n + i, k + j] == A4[i, j]\n+ assert A[n - i - 1, k - j - 1] == A1[n - i - 1, k - j - 1]\n+ assert A[2 * n, 2 * k] == A4[n, k]\n+\n+\n+def test_block_index_large():\n+ n, m, k = symbols('n m k', integer=True, positive=True)\n+ i = symbols('i', integer=True, nonnegative=True)\n+ A1 = MatrixSymbol('A1', n, n)\n+ A2 = MatrixSymbol('A2', n, m)\n+ A3 = MatrixSymbol('A3', n, k)\n+ A4 = MatrixSymbol('A4', m, n)\n+ A5 = MatrixSymbol('A5', m, m)\n+ A6 = MatrixSymbol('A6', m, k)\n+ A7 = MatrixSymbol('A7', k, n)\n+ A8 = MatrixSymbol('A8', k, m)\n+ A9 = MatrixSymbol('A9', k, k)\n+ A = BlockMatrix([[A1, A2, A3], [A4, A5, A6], [A7, A8, A9]])\n+ assert A[n + i, n + i] == MatrixElement(A, n + i, n + i)\n+\n+\n+@XFAIL\n+def test_block_index_symbolic_fail():\n+ # To make this work, symbolic matrix dimensions would need to be somehow assumed nonnegative\n+ # even if the symbols aren't specified as such. Then 2 * n < n would correctly evaluate to\n+ # False in BlockMatrix._entry()\n+ A1 = MatrixSymbol('A1', n, 1)\n+ A2 = MatrixSymbol('A2', m, 1)\n+ A = BlockMatrix([[A1], [A2]])\n+ assert A[2 * n, 0] == A2[n, 0]\n+\n+\n def test_slicing():\n A.as_explicit()[0, :] # does not raise an error\n \n", + "fail_to_pass": "[\"test_block_index_symbolic\", \"test_block_index_symbolic_nonzero\", \"test_block_index_large\"]", + "pass_to_pass": "[\"test_bc_matmul\", \"test_bc_matadd\", \"test_bc_transpose\", \"test_bc_dist_diag\", \"test_block_plus_ident\", \"test_BlockMatrix\", \"test_block_collapse_explicit_matrices\", \"test_issue_17624\", \"test_issue_18618\", \"test_BlockMatrix_trace\", \"test_BlockMatrix_Determinant\", \"test_squareBlockMatrix\", \"test_BlockDiagMatrix\", \"test_blockcut\", \"test_reblock_2x2\", \"test_deblock\", \"test_symbolic_indexing\", \"test_add_index\", \"test_mul_index\", \"test_pow_index\", \"test_transpose_index\", \"test_Identity_index\", \"test_block_index\", \"test_slicing\", \"test_errors\", \"test_matrix_expression_to_indices\"]", + "expected_spans": { + "sympy/matrices/expressions/blockmatrix.py": [ + "imports", + "BlockMatrix._entry" + ] + }, + "test_file_spans": { + "sympy/matrices/expressions/tests/test_blockmatrix.py": [ + "test_blockcut" + ], + "sympy/matrices/expressions/tests/test_indexing.py": [ + "imports", + "test_slicing" + ] + }, + "resolved_by": [ + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sympy/matrices/expressions/matexpr.py": [ + "MatrixElement.__new__" + ] + }, + "alternative_spans": {} + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-12700", + "repo": "django/django", + "base_commit": "d51c50d836c5cf8db5566da17963f871be554615", + "problem_statement": "Settings are cleaned insufficiently.\nDescription\n\t\nPosting publicly after checking with the rest of the security team.\nI just ran into a case where django.views.debug.SafeExceptionReporterFilter.get_safe_settings() would return several un-cleansed values. Looking at cleanse_setting() I realized that we \u200bonly take care of `dict`s but don't take other types of iterables into account but \u200breturn them as-is.\nExample:\nIn my settings.py I have this:\nMY_SETTING = {\n\t\"foo\": \"value\",\n\t\"secret\": \"value\",\n\t\"token\": \"value\",\n\t\"something\": [\n\t\t{\"foo\": \"value\"},\n\t\t{\"secret\": \"value\"},\n\t\t{\"token\": \"value\"},\n\t],\n\t\"else\": [\n\t\t[\n\t\t\t{\"foo\": \"value\"},\n\t\t\t{\"secret\": \"value\"},\n\t\t\t{\"token\": \"value\"},\n\t\t],\n\t\t[\n\t\t\t{\"foo\": \"value\"},\n\t\t\t{\"secret\": \"value\"},\n\t\t\t{\"token\": \"value\"},\n\t\t],\n\t]\n}\nOn Django 3.0 and below:\n>>> import pprint\n>>> from django.views.debug import get_safe_settings\n>>> pprint.pprint(get_safe_settings()[\"MY_SETTING\"])\n{'else': [[{'foo': 'value'}, {'secret': 'value'}, {'token': 'value'}],\n\t\t [{'foo': 'value'}, {'secret': 'value'}, {'token': 'value'}]],\n 'foo': 'value',\n 'secret': '********************',\n 'something': [{'foo': 'value'}, {'secret': 'value'}, {'token': 'value'}],\n 'token': '********************'}\nOn Django 3.1 and up:\n>>> from django.views.debug import SafeExceptionReporterFilter\n>>> import pprint\n>>> pprint.pprint(SafeExceptionReporterFilter().get_safe_settings()[\"MY_SETTING\"])\n{'else': [[{'foo': 'value'}, {'secret': 'value'}, {'token': 'value'}],\n\t\t [{'foo': 'value'}, {'secret': 'value'}, {'token': 'value'}]],\n 'foo': 'value',\n 'secret': '********************',\n 'something': [{'foo': 'value'}, {'secret': 'value'}, {'token': 'value'}],\n 'token': '********************'}\n", + "golden_patch": "diff --git a/django/views/debug.py b/django/views/debug.py\n--- a/django/views/debug.py\n+++ b/django/views/debug.py\n@@ -90,6 +90,10 @@ def cleanse_setting(self, key, value):\n cleansed = self.cleansed_substitute\n elif isinstance(value, dict):\n cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()}\n+ elif isinstance(value, list):\n+ cleansed = [self.cleanse_setting('', v) for v in value]\n+ elif isinstance(value, tuple):\n+ cleansed = tuple([self.cleanse_setting('', v) for v in value])\n else:\n cleansed = value\n except TypeError:\n", + "test_patch": "diff --git a/tests/view_tests/tests/test_debug.py b/tests/view_tests/tests/test_debug.py\n--- a/tests/view_tests/tests/test_debug.py\n+++ b/tests/view_tests/tests/test_debug.py\n@@ -1249,6 +1249,41 @@ def test_cleanse_setting_recurses_in_dictionary(self):\n {'login': 'cooper', 'password': reporter_filter.cleansed_substitute},\n )\n \n+ def test_cleanse_setting_recurses_in_list_tuples(self):\n+ reporter_filter = SafeExceptionReporterFilter()\n+ initial = [\n+ {\n+ 'login': 'cooper',\n+ 'password': 'secret',\n+ 'apps': (\n+ {'name': 'app1', 'api_key': 'a06b-c462cffae87a'},\n+ {'name': 'app2', 'api_key': 'a9f4-f152e97ad808'},\n+ ),\n+ 'tokens': ['98b37c57-ec62-4e39', '8690ef7d-8004-4916'],\n+ },\n+ {'SECRET_KEY': 'c4d77c62-6196-4f17-a06b-c462cffae87a'},\n+ ]\n+ cleansed = [\n+ {\n+ 'login': 'cooper',\n+ 'password': reporter_filter.cleansed_substitute,\n+ 'apps': (\n+ {'name': 'app1', 'api_key': reporter_filter.cleansed_substitute},\n+ {'name': 'app2', 'api_key': reporter_filter.cleansed_substitute},\n+ ),\n+ 'tokens': reporter_filter.cleansed_substitute,\n+ },\n+ {'SECRET_KEY': reporter_filter.cleansed_substitute},\n+ ]\n+ self.assertEqual(\n+ reporter_filter.cleanse_setting('SETTING_NAME', initial),\n+ cleansed,\n+ )\n+ self.assertEqual(\n+ reporter_filter.cleanse_setting('SETTING_NAME', tuple(initial)),\n+ tuple(cleansed),\n+ )\n+\n def test_request_meta_filtering(self):\n request = self.rf.get('/', HTTP_SECRET_HEADER='super_secret')\n reporter_filter = SafeExceptionReporterFilter()\n", + "fail_to_pass": "[\"test_cleanse_setting_recurses_in_list_tuples (view_tests.tests.test_debug.ExceptionReporterFilterTests)\"]", + "pass_to_pass": "[\"test_repr (view_tests.tests.test_debug.CallableSettingWrapperTests)\", \"test_sensitive_post_parameters_not_called (view_tests.tests.test_debug.DecoratorsTests)\", \"test_sensitive_variables_not_called (view_tests.tests.test_debug.DecoratorsTests)\", \"test_cleansed_substitute_override (view_tests.tests.test_debug.CustomExceptionReporterFilterTests)\", \"test_hidden_settings_override (view_tests.tests.test_debug.CustomExceptionReporterFilterTests)\", \"test_setting_allows_custom_subclass (view_tests.tests.test_debug.CustomExceptionReporterFilterTests)\", \"test_handle_db_exception (view_tests.tests.test_debug.DebugViewQueriesAllowedTests)\", \"test_400 (view_tests.tests.test_debug.NonDjangoTemplatesDebugViewTests)\", \"test_403 (view_tests.tests.test_debug.NonDjangoTemplatesDebugViewTests)\", \"test_404 (view_tests.tests.test_debug.NonDjangoTemplatesDebugViewTests)\", \"test_template_not_found_error (view_tests.tests.test_debug.NonDjangoTemplatesDebugViewTests)\", \"An exception report can be generated even for a disallowed host.\", \"test_message_only (view_tests.tests.test_debug.PlainTextReportTests)\", \"An exception report can be generated for just a request\", \"An exception report can be generated without request\", \"A simple exception report can be generated\", \"A message can be provided in addition to a request\", \"test_request_with_items_key (view_tests.tests.test_debug.PlainTextReportTests)\", \"test_template_exception (view_tests.tests.test_debug.PlainTextReportTests)\", \"test_custom_exception_reporter_filter (view_tests.tests.test_debug.NonHTMLResponseExceptionReporterFilter)\", \"test_non_html_response_encoding (view_tests.tests.test_debug.NonHTMLResponseExceptionReporterFilter)\", \"test_non_sensitive_request (view_tests.tests.test_debug.NonHTMLResponseExceptionReporterFilter)\", \"test_paranoid_request (view_tests.tests.test_debug.NonHTMLResponseExceptionReporterFilter)\", \"test_sensitive_request (view_tests.tests.test_debug.NonHTMLResponseExceptionReporterFilter)\", \"test_400 (view_tests.tests.test_debug.DebugViewTests)\", \"test_403 (view_tests.tests.test_debug.DebugViewTests)\", \"test_403_template (view_tests.tests.test_debug.DebugViewTests)\", \"test_404 (view_tests.tests.test_debug.DebugViewTests)\", \"test_404_empty_path_not_in_urls (view_tests.tests.test_debug.DebugViewTests)\", \"test_404_not_in_urls (view_tests.tests.test_debug.DebugViewTests)\", \"test_classbased_technical_404 (view_tests.tests.test_debug.DebugViewTests)\", \"test_default_urlconf_template (view_tests.tests.test_debug.DebugViewTests)\", \"test_exception_reporter_from_request (view_tests.tests.test_debug.DebugViewTests)\", \"test_exception_reporter_from_settings (view_tests.tests.test_debug.DebugViewTests)\", \"test_files (view_tests.tests.test_debug.DebugViewTests)\", \"test_no_template_source_loaders (view_tests.tests.test_debug.DebugViewTests)\", \"test_non_l10ned_numeric_ids (view_tests.tests.test_debug.DebugViewTests)\", \"test_regression_21530 (view_tests.tests.test_debug.DebugViewTests)\", \"test_technical_404 (view_tests.tests.test_debug.DebugViewTests)\", \"test_technical_404_converter_raise_404 (view_tests.tests.test_debug.DebugViewTests)\", \"test_template_encoding (view_tests.tests.test_debug.DebugViewTests)\", \"test_template_exceptions (view_tests.tests.test_debug.DebugViewTests)\", \"Tests for not existing file\", \"test_encoding_error (view_tests.tests.test_debug.ExceptionReporterTests)\", \"The ExceptionReporter supports Unix, Windows and Macintosh EOL markers\", \"test_exception_fetching_user (view_tests.tests.test_debug.ExceptionReporterTests)\", \"test_ignore_traceback_evaluation_exceptions (view_tests.tests.test_debug.ExceptionReporterTests)\", \"Safe strings in local variables are escaped.\", \"test_message_only (view_tests.tests.test_debug.ExceptionReporterTests)\", \"Non-UTF-8 exceptions/values should not make the output generation choke.\", \"test_reporting_frames_for_cyclic_reference (view_tests.tests.test_debug.ExceptionReporterTests)\", \"test_reporting_frames_source_not_match (view_tests.tests.test_debug.ExceptionReporterTests)\", \"test_reporting_frames_without_source (view_tests.tests.test_debug.ExceptionReporterTests)\", \"test_reporting_of_nested_exceptions (view_tests.tests.test_debug.ExceptionReporterTests)\", \"test_request_with_items_key (view_tests.tests.test_debug.ExceptionReporterTests)\", \"test_template_encoding (view_tests.tests.test_debug.ExceptionReporterTests)\", \"Large values should not create a large HTML.\", \"test_unfrozen_importlib (view_tests.tests.test_debug.ExceptionReporterTests)\", \"Unprintable values should not make the output generation choke.\", \"test_callable_settings (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_callable_settings_forbidding_to_set_attributes (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_cleanse_setting_basic (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_cleanse_setting_ignore_case (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_cleanse_setting_recurses_in_dictionary (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_custom_exception_reporter_filter (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_dict_setting_with_non_str_key (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_exception_report_uses_meta_filtering (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_multivalue_dict_key_error (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_non_sensitive_request (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_paranoid_request (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_request_meta_filtering (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_sensitive_function_arguments (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_sensitive_function_keyword_arguments (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_sensitive_method (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_sensitive_request (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_sensitive_settings (view_tests.tests.test_debug.ExceptionReporterFilterTests)\", \"test_settings_with_sensitive_keys (view_tests.tests.test_debug.ExceptionReporterFilterTests)\"]", + "expected_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "test_file_spans": { + "tests/view_tests/tests/test_debug.py": [ + "ExceptionReporterFilterTests.test_request_meta_filtering" + ] + }, + "resolved_by": [ + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/views/debug.py": [ + "imports", + "CallableSettingWrapper.__init__", + "technical_500_response", + "get_default_exception_reporter_filter", + "get_exception_reporter_class", + "SafeExceptionReporterFilter", + "SafeExceptionReporterFilter.cleanse_setting", + "SafeExceptionReporterFilter.get_safe_settings", + "SafeExceptionReporterFilter.get_cleansed_multivaluedict", + "SafeExceptionReporterFilter.get_post_parameters", + "SafeExceptionReporterFilter.cleanse_special_types", + "SafeExceptionReporterFilter.get_traceback_frame_variables", + "ExceptionReporter", + "ExceptionReporter.__init__", + "ExceptionReporter.get_traceback_data", + "ExceptionReporter.get_traceback_html", + "ExceptionReporter.get_traceback_text", + "ExceptionReporter._get_lines_from_file", + "ExceptionReporter.get_traceback_frames", + "technical_404_response", + "default_urlconf" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "imports", + "CallableSettingWrapper.__init__", + "technical_500_response", + "get_default_exception_reporter_filter", + "get_exception_reporter_class", + "SafeExceptionReporterFilter", + "SafeExceptionReporterFilter.cleanse_setting", + "SafeExceptionReporterFilter.get_safe_settings", + "SafeExceptionReporterFilter.get_cleansed_multivaluedict", + "SafeExceptionReporterFilter.get_post_parameters", + "SafeExceptionReporterFilter.cleanse_special_types", + "SafeExceptionReporterFilter.get_traceback_frame_variables", + "ExceptionReporter", + "ExceptionReporter.__init__", + "ExceptionReporter.get_traceback_data", + "ExceptionReporter.get_traceback_html", + "ExceptionReporter.get_traceback_text", + "ExceptionReporter._get_lines_from_file", + "ExceptionReporter.get_traceback_frames", + "technical_404_response", + "default_urlconf" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ], + "tests/view_tests/tests/test_debug.py": [ + "imports", + "User", + "ExceptionReportTestMixin", + "ExceptionReporterFilterTests" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + }, + "alternative_spans": { + "django/views/debug.py": [ + "SafeExceptionReporterFilter.cleanse_setting" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-12708", + "repo": "django/django", + "base_commit": "447980e72ac01da1594dd3373a03ba40b7ee6f80", + "problem_statement": "Migration crashes deleting an index_together if there is a unique_together on the same fields\nDescription\n\t\nHappens with Django 1.11.10\nSteps to reproduce:\n1) Create models with 2 fields, add 2 same fields to unique_together and to index_together\n2) Delete index_together -> Fail\nIt will fail at django/db/backends/base/schema.py, line 378, in _delete_composed_index(), ValueError: Found wrong number (2) of constraints for as this one will find two constraints, the _uniq and the _idx one. No way to get out of this...\nThe worst in my case is that happened as I wanted to refactor my code to use the \"new\" (Dj 1.11) Options.indexes feature. I am actually not deleting the index, just the way it is declared in my code.\nI think there are 2 different points here:\n1) The deletion of index_together should be possible alone or made coherent (migrations side?) with unique_together\n2) Moving the declaration of an index should not result in an index re-creation\n", + "golden_patch": "diff --git a/django/db/backends/base/schema.py b/django/db/backends/base/schema.py\n--- a/django/db/backends/base/schema.py\n+++ b/django/db/backends/base/schema.py\n@@ -393,7 +393,12 @@ def alter_index_together(self, model, old_index_together, new_index_together):\n news = {tuple(fields) for fields in new_index_together}\n # Deleted indexes\n for fields in olds.difference(news):\n- self._delete_composed_index(model, fields, {'index': True}, self.sql_delete_index)\n+ self._delete_composed_index(\n+ model,\n+ fields,\n+ {'index': True, 'unique': False},\n+ self.sql_delete_index,\n+ )\n # Created indexes\n for field_names in news.difference(olds):\n fields = [model._meta.get_field(field) for field in field_names]\n", + "test_patch": "diff --git a/tests/migrations/test_base.py b/tests/migrations/test_base.py\n--- a/tests/migrations/test_base.py\n+++ b/tests/migrations/test_base.py\n@@ -62,7 +62,11 @@ def assertIndexExists(self, table, columns, value=True, using='default', index_t\n any(\n c[\"index\"]\n for c in connections[using].introspection.get_constraints(cursor, table).values()\n- if c['columns'] == list(columns) and (index_type is None or c['type'] == index_type)\n+ if (\n+ c['columns'] == list(columns) and\n+ (index_type is None or c['type'] == index_type) and\n+ not c['unique']\n+ )\n ),\n )\n \n@@ -80,6 +84,14 @@ def assertConstraintExists(self, table, name, value=True, using='default'):\n def assertConstraintNotExists(self, table, name):\n return self.assertConstraintExists(table, name, False)\n \n+ def assertUniqueConstraintExists(self, table, columns, value=True, using='default'):\n+ with connections[using].cursor() as cursor:\n+ constraints = connections[using].introspection.get_constraints(cursor, table).values()\n+ self.assertEqual(\n+ value,\n+ any(c['unique'] for c in constraints if c['columns'] == list(columns)),\n+ )\n+\n def assertFKExists(self, table, columns, to, value=True, using='default'):\n with connections[using].cursor() as cursor:\n self.assertEqual(\ndiff --git a/tests/migrations/test_operations.py b/tests/migrations/test_operations.py\n--- a/tests/migrations/test_operations.py\n+++ b/tests/migrations/test_operations.py\n@@ -1759,6 +1759,29 @@ def test_alter_index_together_remove(self):\n operation = migrations.AlterIndexTogether(\"Pony\", None)\n self.assertEqual(operation.describe(), \"Alter index_together for Pony (0 constraint(s))\")\n \n+ @skipUnlessDBFeature('allows_multiple_constraints_on_same_fields')\n+ def test_alter_index_together_remove_with_unique_together(self):\n+ app_label = 'test_alintoremove_wunto'\n+ table_name = '%s_pony' % app_label\n+ project_state = self.set_up_test_model(app_label, unique_together=True)\n+ self.assertUniqueConstraintExists(table_name, ['pink', 'weight'])\n+ # Add index together.\n+ new_state = project_state.clone()\n+ operation = migrations.AlterIndexTogether('Pony', [('pink', 'weight')])\n+ operation.state_forwards(app_label, new_state)\n+ with connection.schema_editor() as editor:\n+ operation.database_forwards(app_label, editor, project_state, new_state)\n+ self.assertIndexExists(table_name, ['pink', 'weight'])\n+ # Remove index together.\n+ project_state = new_state\n+ new_state = project_state.clone()\n+ operation = migrations.AlterIndexTogether('Pony', set())\n+ operation.state_forwards(app_label, new_state)\n+ with connection.schema_editor() as editor:\n+ operation.database_forwards(app_label, editor, project_state, new_state)\n+ self.assertIndexNotExists(table_name, ['pink', 'weight'])\n+ self.assertUniqueConstraintExists(table_name, ['pink', 'weight'])\n+\n @skipUnlessDBFeature('supports_table_check_constraints')\n def test_add_constraint(self):\n project_state = self.set_up_test_model(\"test_addconstraint\")\n", + "fail_to_pass": "[\"test_alter_index_together_remove_with_unique_together (migrations.test_operations.OperationTests)\"]", + "pass_to_pass": "[\"test_references_model_mixin (migrations.test_operations.TestCreateModel)\", \"test_reference_field_by_through_fields (migrations.test_operations.FieldOperationTests)\", \"test_references_field_by_from_fields (migrations.test_operations.FieldOperationTests)\", \"test_references_field_by_name (migrations.test_operations.FieldOperationTests)\", \"test_references_field_by_remote_field_model (migrations.test_operations.FieldOperationTests)\", \"test_references_field_by_through (migrations.test_operations.FieldOperationTests)\", \"test_references_field_by_to_fields (migrations.test_operations.FieldOperationTests)\", \"test_references_model (migrations.test_operations.FieldOperationTests)\", \"test_add_field_ignore_swapped (migrations.test_operations.SwappableOperationTests)\", \"test_create_ignore_swapped (migrations.test_operations.SwappableOperationTests)\", \"test_delete_ignore_swapped (migrations.test_operations.SwappableOperationTests)\", \"test_indexes_ignore_swapped (migrations.test_operations.SwappableOperationTests)\", \"test_add_binaryfield (migrations.test_operations.OperationTests)\", \"test_add_charfield (migrations.test_operations.OperationTests)\", \"test_add_constraint (migrations.test_operations.OperationTests)\", \"test_add_constraint_combinable (migrations.test_operations.OperationTests)\", \"test_add_constraint_percent_escaping (migrations.test_operations.OperationTests)\", \"test_add_field (migrations.test_operations.OperationTests)\", \"test_add_field_m2m (migrations.test_operations.OperationTests)\", \"test_add_field_preserve_default (migrations.test_operations.OperationTests)\", \"test_add_index (migrations.test_operations.OperationTests)\", \"test_add_index_state_forwards (migrations.test_operations.OperationTests)\", \"test_add_or_constraint (migrations.test_operations.OperationTests)\", \"test_add_partial_unique_constraint (migrations.test_operations.OperationTests)\", \"test_add_textfield (migrations.test_operations.OperationTests)\", \"test_alter_field (migrations.test_operations.OperationTests)\", \"test_alter_field_m2m (migrations.test_operations.OperationTests)\", \"test_alter_field_pk (migrations.test_operations.OperationTests)\", \"test_alter_field_pk_fk (migrations.test_operations.OperationTests)\", \"test_alter_field_reloads_state_on_fk_target_changes (migrations.test_operations.OperationTests)\", \"test_alter_field_reloads_state_on_fk_with_to_field_related_name_target_type_change (migrations.test_operations.OperationTests)\", \"test_alter_field_reloads_state_on_fk_with_to_field_target_changes (migrations.test_operations.OperationTests)\", \"test_alter_field_reloads_state_on_fk_with_to_field_target_type_change (migrations.test_operations.OperationTests)\", \"test_alter_field_with_index (migrations.test_operations.OperationTests)\", \"test_alter_fk (migrations.test_operations.OperationTests)\", \"test_alter_fk_non_fk (migrations.test_operations.OperationTests)\", \"test_alter_index_together (migrations.test_operations.OperationTests)\", \"test_alter_index_together_remove (migrations.test_operations.OperationTests)\", \"test_alter_model_managers (migrations.test_operations.OperationTests)\", \"test_alter_model_managers_emptying (migrations.test_operations.OperationTests)\", \"test_alter_model_options (migrations.test_operations.OperationTests)\", \"test_alter_model_options_emptying (migrations.test_operations.OperationTests)\", \"test_alter_model_table (migrations.test_operations.OperationTests)\", \"test_alter_model_table_m2m (migrations.test_operations.OperationTests)\", \"test_alter_model_table_none (migrations.test_operations.OperationTests)\", \"test_alter_model_table_noop (migrations.test_operations.OperationTests)\", \"test_alter_order_with_respect_to (migrations.test_operations.OperationTests)\", \"test_alter_unique_together (migrations.test_operations.OperationTests)\", \"test_alter_unique_together_remove (migrations.test_operations.OperationTests)\", \"A field may be migrated from AutoField to BigAutoField.\", \"test_column_name_quoting (migrations.test_operations.OperationTests)\", \"test_create_model (migrations.test_operations.OperationTests)\", \"test_create_model_inheritance (migrations.test_operations.OperationTests)\", \"test_create_model_m2m (migrations.test_operations.OperationTests)\", \"test_create_model_managers (migrations.test_operations.OperationTests)\", \"test_create_model_with_constraint (migrations.test_operations.OperationTests)\", \"test_create_model_with_duplicate_base (migrations.test_operations.OperationTests)\", \"test_create_model_with_duplicate_field_name (migrations.test_operations.OperationTests)\", \"test_create_model_with_duplicate_manager_name (migrations.test_operations.OperationTests)\", \"test_create_model_with_partial_unique_constraint (migrations.test_operations.OperationTests)\", \"test_create_model_with_unique_after (migrations.test_operations.OperationTests)\", \"test_create_proxy_model (migrations.test_operations.OperationTests)\", \"test_create_unmanaged_model (migrations.test_operations.OperationTests)\", \"test_delete_model (migrations.test_operations.OperationTests)\", \"test_delete_mti_model (migrations.test_operations.OperationTests)\", \"test_delete_proxy_model (migrations.test_operations.OperationTests)\", \"test_model_with_bigautofield (migrations.test_operations.OperationTests)\", \"test_remove_constraint (migrations.test_operations.OperationTests)\", \"test_remove_field (migrations.test_operations.OperationTests)\", \"test_remove_field_m2m (migrations.test_operations.OperationTests)\", \"test_remove_field_m2m_with_through (migrations.test_operations.OperationTests)\", \"test_remove_fk (migrations.test_operations.OperationTests)\", \"test_remove_index (migrations.test_operations.OperationTests)\", \"test_remove_index_state_forwards (migrations.test_operations.OperationTests)\", \"test_remove_partial_unique_constraint (migrations.test_operations.OperationTests)\", \"test_rename_field (migrations.test_operations.OperationTests)\", \"test_rename_field_reloads_state_on_fk_target_changes (migrations.test_operations.OperationTests)\", \"RenameModel renames a many-to-many column after a RenameField.\", \"test_rename_m2m_target_model (migrations.test_operations.OperationTests)\", \"test_rename_m2m_through_model (migrations.test_operations.OperationTests)\", \"test_rename_missing_field (migrations.test_operations.OperationTests)\", \"test_rename_model (migrations.test_operations.OperationTests)\", \"test_rename_model_state_forwards (migrations.test_operations.OperationTests)\", \"test_rename_model_with_m2m (migrations.test_operations.OperationTests)\", \"test_rename_model_with_self_referential_fk (migrations.test_operations.OperationTests)\", \"test_rename_model_with_self_referential_m2m (migrations.test_operations.OperationTests)\", \"test_rename_model_with_superclass_fk (migrations.test_operations.OperationTests)\", \"test_rename_referenced_field_state_forward (migrations.test_operations.OperationTests)\", \"test_repoint_field_m2m (migrations.test_operations.OperationTests)\", \"test_run_python (migrations.test_operations.OperationTests)\", \"test_run_python_atomic (migrations.test_operations.OperationTests)\", \"test_run_python_noop (migrations.test_operations.OperationTests)\", \"test_run_python_related_assignment (migrations.test_operations.OperationTests)\", \"test_run_sql (migrations.test_operations.OperationTests)\", \"test_run_sql_noop (migrations.test_operations.OperationTests)\", \"test_run_sql_params (migrations.test_operations.OperationTests)\", \"test_run_sql_params_invalid (migrations.test_operations.OperationTests)\", \"test_separate_database_and_state (migrations.test_operations.OperationTests)\", \"test_separate_database_and_state2 (migrations.test_operations.OperationTests)\", \"A field may be migrated from SmallAutoField to AutoField.\", \"A field may be migrated from SmallAutoField to BigAutoField.\"]", + "expected_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor.alter_index_together" + ] + }, + "test_file_spans": { + "tests/migrations/test_base.py": [ + "MigrationTestBase.assertIndexExists", + "MigrationTestBase.assertFKExists" + ], + "tests/migrations/test_operations.py": [ + "OperationTests.test_add_constraint" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + }, + "alternative_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + }, + "alternative_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor.alter_index_together" + ] + }, + "alternative_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor.alter_index_together" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/db/backends/base/schema.py": [ + "imports", + "_is_relevant_relation", + "_all_related_fields", + "_related_non_m2m_objects", + "BaseDatabaseSchemaEditor", + "BaseDatabaseSchemaEditor.__init__", + "BaseDatabaseSchemaEditor.__exit__", + "BaseDatabaseSchemaEditor.execute", + "BaseDatabaseSchemaEditor.table_sql", + "BaseDatabaseSchemaEditor.column_sql", + "BaseDatabaseSchemaEditor.prepare_default", + "BaseDatabaseSchemaEditor._effective_default", + "BaseDatabaseSchemaEditor.effective_default", + "BaseDatabaseSchemaEditor.quote_value", + "BaseDatabaseSchemaEditor.create_model", + "BaseDatabaseSchemaEditor.delete_model", + "BaseDatabaseSchemaEditor.alter_unique_together", + "BaseDatabaseSchemaEditor.alter_index_together", + "BaseDatabaseSchemaEditor._delete_composed_index", + "BaseDatabaseSchemaEditor.alter_db_table", + "BaseDatabaseSchemaEditor.alter_db_tablespace", + "BaseDatabaseSchemaEditor.add_field", + "BaseDatabaseSchemaEditor.remove_field", + "BaseDatabaseSchemaEditor.alter_field", + "BaseDatabaseSchemaEditor._alter_field", + "BaseDatabaseSchemaEditor._alter_column_null_sql", + "BaseDatabaseSchemaEditor._alter_column_default_sql", + "BaseDatabaseSchemaEditor._alter_column_type_sql", + "BaseDatabaseSchemaEditor._alter_many_to_many", + "BaseDatabaseSchemaEditor._create_index_name", + "BaseDatabaseSchemaEditor._create_index_sql", + "BaseDatabaseSchemaEditor._delete_index_sql", + "BaseDatabaseSchemaEditor._index_columns", + "BaseDatabaseSchemaEditor._model_indexes_sql", + "BaseDatabaseSchemaEditor._unique_should_be_added", + "BaseDatabaseSchemaEditor._rename_field_sql", + "BaseDatabaseSchemaEditor._create_fk_sql", + "BaseDatabaseSchemaEditor._fk_constraint_name", + "BaseDatabaseSchemaEditor._delete_fk_sql", + "BaseDatabaseSchemaEditor._unique_sql", + "BaseDatabaseSchemaEditor._create_unique_sql", + "BaseDatabaseSchemaEditor._delete_unique_sql", + "BaseDatabaseSchemaEditor._check_sql", + "BaseDatabaseSchemaEditor._create_check_sql", + "BaseDatabaseSchemaEditor._delete_check_sql", + "BaseDatabaseSchemaEditor._delete_constraint_sql", + "BaseDatabaseSchemaEditor._constraint_names", + "BaseDatabaseSchemaEditor._delete_primary_key", + "BaseDatabaseSchemaEditor._create_primary_key_sql", + "BaseDatabaseSchemaEditor._delete_primary_key_sql", + "BaseDatabaseSchemaEditor.remove_procedure" + ] + }, + "alternative_spans": { + "django/db/backends/base/schema.py": [ + "imports", + "_is_relevant_relation", + "_all_related_fields", + "_related_non_m2m_objects", + "BaseDatabaseSchemaEditor", + "BaseDatabaseSchemaEditor.__init__", + "BaseDatabaseSchemaEditor.__exit__", + "BaseDatabaseSchemaEditor.execute", + "BaseDatabaseSchemaEditor.table_sql", + "BaseDatabaseSchemaEditor.column_sql", + "BaseDatabaseSchemaEditor.prepare_default", + "BaseDatabaseSchemaEditor._effective_default", + "BaseDatabaseSchemaEditor.effective_default", + "BaseDatabaseSchemaEditor.quote_value", + "BaseDatabaseSchemaEditor.create_model", + "BaseDatabaseSchemaEditor.delete_model", + "BaseDatabaseSchemaEditor.alter_unique_together", + "BaseDatabaseSchemaEditor.alter_index_together", + "BaseDatabaseSchemaEditor._delete_composed_index", + "BaseDatabaseSchemaEditor.alter_db_table", + "BaseDatabaseSchemaEditor.alter_db_tablespace", + "BaseDatabaseSchemaEditor.add_field", + "BaseDatabaseSchemaEditor.remove_field", + "BaseDatabaseSchemaEditor.alter_field", + "BaseDatabaseSchemaEditor._alter_field", + "BaseDatabaseSchemaEditor._alter_column_null_sql", + "BaseDatabaseSchemaEditor._alter_column_default_sql", + "BaseDatabaseSchemaEditor._alter_column_type_sql", + "BaseDatabaseSchemaEditor._alter_many_to_many", + "BaseDatabaseSchemaEditor._create_index_name", + "BaseDatabaseSchemaEditor._create_index_sql", + "BaseDatabaseSchemaEditor._delete_index_sql", + "BaseDatabaseSchemaEditor._index_columns", + "BaseDatabaseSchemaEditor._model_indexes_sql", + "BaseDatabaseSchemaEditor._unique_should_be_added", + "BaseDatabaseSchemaEditor._rename_field_sql", + "BaseDatabaseSchemaEditor._create_fk_sql", + "BaseDatabaseSchemaEditor._fk_constraint_name", + "BaseDatabaseSchemaEditor._delete_fk_sql", + "BaseDatabaseSchemaEditor._unique_sql", + "BaseDatabaseSchemaEditor._create_unique_sql", + "BaseDatabaseSchemaEditor._delete_unique_sql", + "BaseDatabaseSchemaEditor._check_sql", + "BaseDatabaseSchemaEditor._create_check_sql", + "BaseDatabaseSchemaEditor._delete_check_sql", + "BaseDatabaseSchemaEditor._delete_constraint_sql", + "BaseDatabaseSchemaEditor._constraint_names", + "BaseDatabaseSchemaEditor._delete_primary_key", + "BaseDatabaseSchemaEditor._create_primary_key_sql", + "BaseDatabaseSchemaEditor._delete_primary_key_sql", + "BaseDatabaseSchemaEditor.remove_procedure" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + }, + "alternative_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor.alter_index_together", + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + }, + "alternative_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor.alter_index_together", + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + }, + "alternative_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ], + "django/db/migrations/operations/models.py": [ + "RemoveIndex.database_forwards" + ] + }, + "alternative_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + }, + "alternative_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + }, + "alternative_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + }, + "alternative_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + }, + "alternative_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + }, + "alternative_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + }, + "alternative_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + }, + "alternative_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index", + "BaseDatabaseSchemaEditor.alter_db_table" + ], + "tests/migrations/test_migrations_squashed_complex/6_auto.py": [ + "imports", + "Migration" + ] + }, + "alternative_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index", + "BaseDatabaseSchemaEditor.alter_db_table" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240523_aider", + "spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "run_name": "20240925_hyperagent_lite1", + "spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "run_name": "20240808_RepoGraph_gpt4o", + "spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "run_name": "20240829_Isoform", + "spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "run_name": "20240604_CodeR", + "spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "run_name": "20240728_sweagent_gpt4o", + "spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "run_name": "20241028_agentless-1.5_gpt4o", + "spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "run_name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "run_name": "20240612_IBM_Research_Agent101", + "spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "run_name": "20240617_factory_code_droid", + "spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "run_name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "run_name": "20240811_gru", + "spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index" + ] + } + }, + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor._delete_composed_index", + "BaseDatabaseSchemaEditor.alter_db_table" + ] + } + } + ] + }, + { + "instance_id": "django__django-12747", + "repo": "django/django", + "base_commit": "c86201b6ed4f8256b0a0520c08aa674f623d4127", + "problem_statement": "QuerySet.Delete - inconsistent result when zero objects deleted\nDescription\n\t\nThe result format of the QuerySet.Delete method is a tuple: (X, Y) \nX - is the total amount of deleted objects (including foreign key deleted objects)\nY - is a dictionary specifying counters of deleted objects for each specific model (the key is the _meta.label of the model and the value is counter of deleted objects of this model).\nExample: : (2, {'my_app.FileAccess': 1, 'my_app.File': 1})\nWhen there are zero objects to delete in total - the result is inconsistent:\nFor models with foreign keys - the result will be: : (0, {})\nFor \"simple\" models without foreign key - the result will be: : (0, {'my_app.BlockLibrary': 0})\nI would expect there will be no difference between the two cases: Either both will have the empty dictionary OR both will have dictionary with model-label keys and zero value.\n", + "golden_patch": "diff --git a/django/db/models/deletion.py b/django/db/models/deletion.py\n--- a/django/db/models/deletion.py\n+++ b/django/db/models/deletion.py\n@@ -408,7 +408,8 @@ def delete(self):\n # fast deletes\n for qs in self.fast_deletes:\n count = qs._raw_delete(using=self.using)\n- deleted_counter[qs.model._meta.label] += count\n+ if count:\n+ deleted_counter[qs.model._meta.label] += count\n \n # update fields\n for model, instances_for_fieldvalues in self.field_updates.items():\n@@ -426,7 +427,8 @@ def delete(self):\n query = sql.DeleteQuery(model)\n pk_list = [obj.pk for obj in instances]\n count = query.delete_batch(pk_list, self.using)\n- deleted_counter[model._meta.label] += count\n+ if count:\n+ deleted_counter[model._meta.label] += count\n \n if not model._meta.auto_created:\n for obj in instances:\n", + "test_patch": "diff --git a/tests/delete/tests.py b/tests/delete/tests.py\n--- a/tests/delete/tests.py\n+++ b/tests/delete/tests.py\n@@ -522,11 +522,10 @@ def test_queryset_delete_returns_num_rows(self):\n existed_objs = {\n R._meta.label: R.objects.count(),\n HiddenUser._meta.label: HiddenUser.objects.count(),\n- A._meta.label: A.objects.count(),\n- MR._meta.label: MR.objects.count(),\n HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),\n }\n deleted, deleted_objs = R.objects.all().delete()\n+ self.assertCountEqual(deleted_objs.keys(), existed_objs.keys())\n for k, v in existed_objs.items():\n self.assertEqual(deleted_objs[k], v)\n \n@@ -550,13 +549,13 @@ def test_model_delete_returns_num_rows(self):\n existed_objs = {\n R._meta.label: R.objects.count(),\n HiddenUser._meta.label: HiddenUser.objects.count(),\n- A._meta.label: A.objects.count(),\n MR._meta.label: MR.objects.count(),\n HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),\n M.m2m.through._meta.label: M.m2m.through.objects.count(),\n }\n deleted, deleted_objs = r.delete()\n self.assertEqual(deleted, sum(existed_objs.values()))\n+ self.assertCountEqual(deleted_objs.keys(), existed_objs.keys())\n for k, v in existed_objs.items():\n self.assertEqual(deleted_objs[k], v)\n \n@@ -694,7 +693,7 @@ def test_fast_delete_empty_no_update_can_self_select(self):\n with self.assertNumQueries(1):\n self.assertEqual(\n User.objects.filter(avatar__desc='missing').delete(),\n- (0, {'delete.User': 0})\n+ (0, {}),\n )\n \n def test_fast_delete_combined_relationships(self):\n", + "fail_to_pass": "[\"test_fast_delete_empty_no_update_can_self_select (delete.tests.FastDeleteTests)\", \"test_model_delete_returns_num_rows (delete.tests.DeletionTests)\", \"test_queryset_delete_returns_num_rows (delete.tests.DeletionTests)\"]", + "pass_to_pass": "[\"test_fast_delete_combined_relationships (delete.tests.FastDeleteTests)\", \"test_fast_delete_fk (delete.tests.FastDeleteTests)\", \"test_fast_delete_inheritance (delete.tests.FastDeleteTests)\", \"test_fast_delete_instance_set_pk_none (delete.tests.FastDeleteTests)\", \"test_fast_delete_joined_qs (delete.tests.FastDeleteTests)\", \"test_fast_delete_large_batch (delete.tests.FastDeleteTests)\", \"test_fast_delete_m2m (delete.tests.FastDeleteTests)\", \"test_fast_delete_qs (delete.tests.FastDeleteTests)\", \"test_fast_delete_revm2m (delete.tests.FastDeleteTests)\", \"test_auto (delete.tests.OnDeleteTests)\", \"test_auto_nullable (delete.tests.OnDeleteTests)\", \"test_cascade (delete.tests.OnDeleteTests)\", \"test_cascade_from_child (delete.tests.OnDeleteTests)\", \"test_cascade_from_parent (delete.tests.OnDeleteTests)\", \"test_cascade_nullable (delete.tests.OnDeleteTests)\", \"test_do_nothing (delete.tests.OnDeleteTests)\", \"test_do_nothing_qscount (delete.tests.OnDeleteTests)\", \"test_inheritance_cascade_down (delete.tests.OnDeleteTests)\", \"test_inheritance_cascade_up (delete.tests.OnDeleteTests)\", \"test_non_callable (delete.tests.OnDeleteTests)\", \"test_o2o_setnull (delete.tests.OnDeleteTests)\", \"test_protect (delete.tests.OnDeleteTests)\", \"test_protect_multiple (delete.tests.OnDeleteTests)\", \"test_protect_path (delete.tests.OnDeleteTests)\", \"test_restrict (delete.tests.OnDeleteTests)\", \"test_restrict_gfk_no_fast_delete (delete.tests.OnDeleteTests)\", \"test_restrict_multiple (delete.tests.OnDeleteTests)\", \"test_restrict_path_cascade_direct (delete.tests.OnDeleteTests)\", \"test_restrict_path_cascade_indirect (delete.tests.OnDeleteTests)\", \"test_restrict_path_cascade_indirect_diamond (delete.tests.OnDeleteTests)\", \"test_setdefault (delete.tests.OnDeleteTests)\", \"test_setdefault_none (delete.tests.OnDeleteTests)\", \"test_setnull (delete.tests.OnDeleteTests)\", \"test_setnull_from_child (delete.tests.OnDeleteTests)\", \"test_setnull_from_parent (delete.tests.OnDeleteTests)\", \"test_setvalue (delete.tests.OnDeleteTests)\", \"test_bulk (delete.tests.DeletionTests)\", \"test_can_defer_constraint_checks (delete.tests.DeletionTests)\", \"test_delete_with_keeping_parents (delete.tests.DeletionTests)\", \"test_delete_with_keeping_parents_relationships (delete.tests.DeletionTests)\", \"test_deletion_order (delete.tests.DeletionTests)\", \"test_hidden_related (delete.tests.DeletionTests)\", \"test_instance_update (delete.tests.DeletionTests)\", \"test_large_delete (delete.tests.DeletionTests)\", \"test_large_delete_related (delete.tests.DeletionTests)\", \"test_m2m (delete.tests.DeletionTests)\", \"test_only_referenced_fields_selected (delete.tests.DeletionTests)\", \"test_proxied_model_duplicate_queries (delete.tests.DeletionTests)\", \"test_relational_post_delete_signals_happen_before_parent_object (delete.tests.DeletionTests)\"]", + "expected_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "test_file_spans": { + "tests/delete/tests.py": [ + "DeletionTests.test_queryset_delete_returns_num_rows", + "DeletionTests.test_model_delete_returns_num_rows", + "FastDeleteTests.test_fast_delete_empty_no_update_can_self_select" + ] + }, + "resolved_by": [ + { + "name": "20240604_CodeR", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete", + "Collector" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete", + "Collector" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/db/models/deletion.py": [ + "Collector.delete", + "Collector" + ] + }, + "alternative_spans": { + "django/db/models/deletion.py": [ + "Collector.delete", + "Collector" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-19254", + "repo": "sympy/sympy", + "base_commit": "e0ef1da13e2ab2a77866c05246f73c871ca9388c", + "problem_statement": "sympy.polys.factortools.dmp_zz_mignotte_bound improvement\nThe method `dup_zz_mignotte_bound(f, K)` can be significantly improved by using the **Knuth-Cohen bound** instead. After our research with Prof. Ag.Akritas we have implemented the Knuth-Cohen bound among others, and compare them among dozens of polynomials with different degree, density and coefficients range. Considering the results and the feedback from Mr.Kalevi Suominen, our proposal is that the mignotte_bound should be replaced by the knuth-cohen bound.\r\nAlso, `dmp_zz_mignotte_bound(f, u, K)` for mutli-variants polynomials should be replaced appropriately.\n", + "golden_patch": "diff --git a/sympy/polys/factortools.py b/sympy/polys/factortools.py\n--- a/sympy/polys/factortools.py\n+++ b/sympy/polys/factortools.py\n@@ -124,13 +124,64 @@ def dmp_trial_division(f, factors, u, K):\n \n \n def dup_zz_mignotte_bound(f, K):\n- \"\"\"Mignotte bound for univariate polynomials in `K[x]`. \"\"\"\n- a = dup_max_norm(f, K)\n- b = abs(dup_LC(f, K))\n- n = dup_degree(f)\n+ \"\"\"\n+ The Knuth-Cohen variant of Mignotte bound for\n+ univariate polynomials in `K[x]`.\n \n- return K.sqrt(K(n + 1))*2**n*a*b\n+ Examples\n+ ========\n+\n+ >>> from sympy.polys import ring, ZZ\n+ >>> R, x = ring(\"x\", ZZ)\n+\n+ >>> f = x**3 + 14*x**2 + 56*x + 64\n+ >>> R.dup_zz_mignotte_bound(f)\n+ 152\n+\n+ By checking `factor(f)` we can see that max coeff is 8\n+\n+ Also consider a case that `f` is irreducible for example `f = 2*x**2 + 3*x + 4`\n+ To avoid a bug for these cases, we return the bound plus the max coefficient of `f`\n+\n+ >>> f = 2*x**2 + 3*x + 4\n+ >>> R.dup_zz_mignotte_bound(f)\n+ 6\n+\n+ Lastly,To see the difference between the new and the old Mignotte bound\n+ consider the irreducible polynomial::\n+\n+ >>> f = 87*x**7 + 4*x**6 + 80*x**5 + 17*x**4 + 9*x**3 + 12*x**2 + 49*x + 26\n+ >>> R.dup_zz_mignotte_bound(f)\n+ 744\n+\n+ The new Mignotte bound is 744 whereas the old one (SymPy 1.5.1) is 1937664.\n+\n+\n+ References\n+ ==========\n+\n+ ..[1] [Abbott2013]_\n+\n+ \"\"\"\n+ from sympy import binomial\n+\n+ d = dup_degree(f)\n+ delta = _ceil(d / 2)\n+ delta2 = _ceil(delta / 2)\n+\n+ # euclidean-norm\n+ eucl_norm = K.sqrt( sum( [cf**2 for cf in f] ) )\n+\n+ # biggest values of binomial coefficients (p. 538 of reference)\n+ t1 = binomial(delta - 1, delta2)\n+ t2 = binomial(delta - 1, delta2 - 1)\n+\n+ lc = K.abs(dup_LC(f, K)) # leading coefficient\n+ bound = t1 * eucl_norm + t2 * lc # (p. 538 of reference)\n+ bound += dup_max_norm(f, K) # add max coeff for irreducible polys\n+ bound = _ceil(bound / 2) * 2 # round up to even integer\n \n+ return bound\n \n def dmp_zz_mignotte_bound(f, u, K):\n \"\"\"Mignotte bound for multivariate polynomials in `K[X]`. \"\"\"\n", + "test_patch": "diff --git a/sympy/polys/tests/test_factortools.py b/sympy/polys/tests/test_factortools.py\n--- a/sympy/polys/tests/test_factortools.py\n+++ b/sympy/polys/tests/test_factortools.py\n@@ -27,7 +27,8 @@ def test_dmp_trial_division():\n \n def test_dup_zz_mignotte_bound():\n R, x = ring(\"x\", ZZ)\n- assert R.dup_zz_mignotte_bound(2*x**2 + 3*x + 4) == 32\n+ assert R.dup_zz_mignotte_bound(2*x**2 + 3*x + 4) == 6\n+ assert R.dup_zz_mignotte_bound(x**3 + 14*x**2 + 56*x + 64) == 152\n \n \n def test_dmp_zz_mignotte_bound():\n", + "fail_to_pass": "[\"test_dup_zz_mignotte_bound\"]", + "pass_to_pass": "[\"test_dup_trial_division\", \"test_dmp_trial_division\", \"test_dmp_zz_mignotte_bound\", \"test_dup_zz_hensel_step\", \"test_dup_zz_hensel_lift\", \"test_dup_zz_irreducible_p\", \"test_dup_cyclotomic_p\", \"test_dup_zz_cyclotomic_poly\", \"test_dup_zz_cyclotomic_factor\", \"test_dup_zz_factor\", \"test_dmp_zz_wang\", \"test_issue_6355\", \"test_dmp_zz_factor\", \"test_dup_ext_factor\", \"test_dmp_ext_factor\", \"test_dup_factor_list\", \"test_dmp_factor_list\", \"test_dup_irreducible_p\"]", + "expected_spans": { + "sympy/polys/factortools.py": [ + "dup_zz_mignotte_bound" + ] + }, + "test_file_spans": { + "sympy/polys/tests/test_factortools.py": [ + "test_dup_zz_mignotte_bound" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-12856", + "repo": "django/django", + "base_commit": "8328811f048fed0dd22573224def8c65410c9f2e", + "problem_statement": "Add check for fields of UniqueConstraints.\nDescription\n\t \n\t\t(last modified by Marnanel Thurman)\n\t \nWhen a model gains a UniqueConstraint, makemigrations doesn't check that the fields named therein actually exist.\nThis is in contrast to the older unique_together syntax, which raises models.E012 if the fields don't exist.\nIn the attached demonstration, you'll need to uncomment \"with_unique_together\" in settings.py in order to show that unique_together raises E012.\n", + "golden_patch": "diff --git a/django/db/models/base.py b/django/db/models/base.py\n--- a/django/db/models/base.py\n+++ b/django/db/models/base.py\n@@ -1926,6 +1926,12 @@ def _check_constraints(cls, databases):\n id='models.W038',\n )\n )\n+ fields = (\n+ field\n+ for constraint in cls._meta.constraints if isinstance(constraint, UniqueConstraint)\n+ for field in constraint.fields\n+ )\n+ errors.extend(cls._check_local_fields(fields, 'constraints'))\n return errors\n \n \n", + "test_patch": "diff --git a/tests/invalid_models_tests/test_models.py b/tests/invalid_models_tests/test_models.py\n--- a/tests/invalid_models_tests/test_models.py\n+++ b/tests/invalid_models_tests/test_models.py\n@@ -1501,3 +1501,70 @@ class Meta:\n ]\n \n self.assertEqual(Model.check(databases=self.databases), [])\n+\n+ def test_unique_constraint_pointing_to_missing_field(self):\n+ class Model(models.Model):\n+ class Meta:\n+ constraints = [models.UniqueConstraint(fields=['missing_field'], name='name')]\n+\n+ self.assertEqual(Model.check(databases=self.databases), [\n+ Error(\n+ \"'constraints' refers to the nonexistent field \"\n+ \"'missing_field'.\",\n+ obj=Model,\n+ id='models.E012',\n+ ),\n+ ])\n+\n+ def test_unique_constraint_pointing_to_m2m_field(self):\n+ class Model(models.Model):\n+ m2m = models.ManyToManyField('self')\n+\n+ class Meta:\n+ constraints = [models.UniqueConstraint(fields=['m2m'], name='name')]\n+\n+ self.assertEqual(Model.check(databases=self.databases), [\n+ Error(\n+ \"'constraints' refers to a ManyToManyField 'm2m', but \"\n+ \"ManyToManyFields are not permitted in 'constraints'.\",\n+ obj=Model,\n+ id='models.E013',\n+ ),\n+ ])\n+\n+ def test_unique_constraint_pointing_to_non_local_field(self):\n+ class Parent(models.Model):\n+ field1 = models.IntegerField()\n+\n+ class Child(Parent):\n+ field2 = models.IntegerField()\n+\n+ class Meta:\n+ constraints = [\n+ models.UniqueConstraint(fields=['field2', 'field1'], name='name'),\n+ ]\n+\n+ self.assertEqual(Child.check(databases=self.databases), [\n+ Error(\n+ \"'constraints' refers to field 'field1' which is not local to \"\n+ \"model 'Child'.\",\n+ hint='This issue may be caused by multi-table inheritance.',\n+ obj=Child,\n+ id='models.E016',\n+ ),\n+ ])\n+\n+ def test_unique_constraint_pointing_to_fk(self):\n+ class Target(models.Model):\n+ pass\n+\n+ class Model(models.Model):\n+ fk_1 = models.ForeignKey(Target, models.CASCADE, related_name='target_1')\n+ fk_2 = models.ForeignKey(Target, models.CASCADE, related_name='target_2')\n+\n+ class Meta:\n+ constraints = [\n+ models.UniqueConstraint(fields=['fk_1_id', 'fk_2'], name='name'),\n+ ]\n+\n+ self.assertEqual(Model.check(databases=self.databases), [])\n", + "fail_to_pass": "[\"test_unique_constraint_pointing_to_m2m_field (invalid_models_tests.test_models.ConstraintsTests)\", \"test_unique_constraint_pointing_to_missing_field (invalid_models_tests.test_models.ConstraintsTests)\", \"test_unique_constraint_pointing_to_non_local_field (invalid_models_tests.test_models.ConstraintsTests)\"]", + "pass_to_pass": "[\"test_check_jsonfield (invalid_models_tests.test_models.JSONFieldTests)\", \"test_check_jsonfield_required_db_features (invalid_models_tests.test_models.JSONFieldTests)\", \"test_ordering_pointing_to_json_field_value (invalid_models_tests.test_models.JSONFieldTests)\", \"test_db_column_clash (invalid_models_tests.test_models.FieldNamesTests)\", \"test_ending_with_underscore (invalid_models_tests.test_models.FieldNamesTests)\", \"test_including_separator (invalid_models_tests.test_models.FieldNamesTests)\", \"test_pk (invalid_models_tests.test_models.FieldNamesTests)\", \"test_list_containing_non_iterable (invalid_models_tests.test_models.UniqueTogetherTests)\", \"test_non_iterable (invalid_models_tests.test_models.UniqueTogetherTests)\", \"test_non_list (invalid_models_tests.test_models.UniqueTogetherTests)\", \"test_pointing_to_fk (invalid_models_tests.test_models.UniqueTogetherTests)\", \"test_pointing_to_m2m (invalid_models_tests.test_models.UniqueTogetherTests)\", \"test_pointing_to_missing_field (invalid_models_tests.test_models.UniqueTogetherTests)\", \"test_valid_model (invalid_models_tests.test_models.UniqueTogetherTests)\", \"test_list_containing_non_iterable (invalid_models_tests.test_models.IndexTogetherTests)\", \"test_non_iterable (invalid_models_tests.test_models.IndexTogetherTests)\", \"test_non_list (invalid_models_tests.test_models.IndexTogetherTests)\", \"test_pointing_to_fk (invalid_models_tests.test_models.IndexTogetherTests)\", \"test_pointing_to_m2m_field (invalid_models_tests.test_models.IndexTogetherTests)\", \"test_pointing_to_missing_field (invalid_models_tests.test_models.IndexTogetherTests)\", \"test_pointing_to_non_local_field (invalid_models_tests.test_models.IndexTogetherTests)\", \"test_field_name_clash_with_child_accessor (invalid_models_tests.test_models.ShadowingFieldsTests)\", \"test_id_clash (invalid_models_tests.test_models.ShadowingFieldsTests)\", \"test_inheritance_clash (invalid_models_tests.test_models.ShadowingFieldsTests)\", \"test_multigeneration_inheritance (invalid_models_tests.test_models.ShadowingFieldsTests)\", \"test_multiinheritance_clash (invalid_models_tests.test_models.ShadowingFieldsTests)\", \"test_index_with_condition (invalid_models_tests.test_models.IndexesTests)\", \"test_index_with_condition_required_db_features (invalid_models_tests.test_models.IndexesTests)\", \"test_max_name_length (invalid_models_tests.test_models.IndexesTests)\", \"test_name_constraints (invalid_models_tests.test_models.IndexesTests)\", \"test_pointing_to_fk (invalid_models_tests.test_models.IndexesTests)\", \"test_pointing_to_m2m_field (invalid_models_tests.test_models.IndexesTests)\", \"test_pointing_to_missing_field (invalid_models_tests.test_models.IndexesTests)\", \"test_pointing_to_non_local_field (invalid_models_tests.test_models.IndexesTests)\", \"test_check_constraints (invalid_models_tests.test_models.ConstraintsTests)\", \"test_check_constraints_required_db_features (invalid_models_tests.test_models.ConstraintsTests)\", \"test_deferrable_unique_constraint (invalid_models_tests.test_models.ConstraintsTests)\", \"test_deferrable_unique_constraint_required_db_features (invalid_models_tests.test_models.ConstraintsTests)\", \"test_unique_constraint_pointing_to_fk (invalid_models_tests.test_models.ConstraintsTests)\", \"test_unique_constraint_with_condition (invalid_models_tests.test_models.ConstraintsTests)\", \"test_unique_constraint_with_condition_required_db_features (invalid_models_tests.test_models.ConstraintsTests)\", \"test_just_order_with_respect_to_no_errors (invalid_models_tests.test_models.OtherModelTests)\", \"test_just_ordering_no_errors (invalid_models_tests.test_models.OtherModelTests)\", \"test_lazy_reference_checks (invalid_models_tests.test_models.OtherModelTests)\", \"test_m2m_autogenerated_table_name_clash (invalid_models_tests.test_models.OtherModelTests)\", \"test_m2m_autogenerated_table_name_clash_database_routers_installed (invalid_models_tests.test_models.OtherModelTests)\", \"test_m2m_field_table_name_clash (invalid_models_tests.test_models.OtherModelTests)\", \"test_m2m_field_table_name_clash_database_routers_installed (invalid_models_tests.test_models.OtherModelTests)\", \"test_m2m_table_name_clash (invalid_models_tests.test_models.OtherModelTests)\", \"test_m2m_table_name_clash_database_routers_installed (invalid_models_tests.test_models.OtherModelTests)\", \"test_m2m_to_concrete_and_proxy_allowed (invalid_models_tests.test_models.OtherModelTests)\", \"test_m2m_unmanaged_shadow_models_not_checked (invalid_models_tests.test_models.OtherModelTests)\", \"test_name_beginning_with_underscore (invalid_models_tests.test_models.OtherModelTests)\", \"test_name_contains_double_underscores (invalid_models_tests.test_models.OtherModelTests)\", \"test_name_ending_with_underscore (invalid_models_tests.test_models.OtherModelTests)\", \"test_non_valid (invalid_models_tests.test_models.OtherModelTests)\", \"test_onetoone_with_explicit_parent_link_parent_model (invalid_models_tests.test_models.OtherModelTests)\", \"test_onetoone_with_parent_model (invalid_models_tests.test_models.OtherModelTests)\", \"test_ordering_allows_registered_lookups (invalid_models_tests.test_models.OtherModelTests)\", \"test_ordering_non_iterable (invalid_models_tests.test_models.OtherModelTests)\", \"test_ordering_pointing_multiple_times_to_model_fields (invalid_models_tests.test_models.OtherModelTests)\", \"test_ordering_pointing_to_foreignkey_field (invalid_models_tests.test_models.OtherModelTests)\", \"test_ordering_pointing_to_lookup_not_transform (invalid_models_tests.test_models.OtherModelTests)\", \"test_ordering_pointing_to_missing_field (invalid_models_tests.test_models.OtherModelTests)\", \"test_ordering_pointing_to_missing_foreignkey_field (invalid_models_tests.test_models.OtherModelTests)\", \"test_ordering_pointing_to_missing_related_field (invalid_models_tests.test_models.OtherModelTests)\", \"test_ordering_pointing_to_missing_related_model_field (invalid_models_tests.test_models.OtherModelTests)\", \"test_ordering_pointing_to_non_related_field (invalid_models_tests.test_models.OtherModelTests)\", \"test_ordering_pointing_to_related_model_pk (invalid_models_tests.test_models.OtherModelTests)\", \"test_ordering_pointing_to_two_related_model_field (invalid_models_tests.test_models.OtherModelTests)\", \"test_ordering_with_order_with_respect_to (invalid_models_tests.test_models.OtherModelTests)\", \"test_property_and_related_field_accessor_clash (invalid_models_tests.test_models.OtherModelTests)\", \"test_single_primary_key (invalid_models_tests.test_models.OtherModelTests)\", \"test_swappable_missing_app (invalid_models_tests.test_models.OtherModelTests)\", \"test_swappable_missing_app_name (invalid_models_tests.test_models.OtherModelTests)\", \"test_two_m2m_through_same_model_with_different_through_fields (invalid_models_tests.test_models.OtherModelTests)\", \"test_two_m2m_through_same_relationship (invalid_models_tests.test_models.OtherModelTests)\", \"test_unique_primary_key (invalid_models_tests.test_models.OtherModelTests)\"]", + "expected_spans": { + "django/db/models/base.py": [ + "Model", + "Model._check_constraints" + ] + }, + "test_file_spans": { + "tests/invalid_models_tests/test_models.py": [] + }, + "resolved_by": [ + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/models/base.py": [ + "Model", + "Model._check_constraints" + ], + "django/db/models/constraints.py": [ + "UniqueConstraint.__init__" + ] + }, + "alternative_spans": { + "django/db/models/base.py": [ + "Model", + "Model._check_constraints" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "pytest-dev__pytest-7168", + "repo": "pytest-dev/pytest", + "base_commit": "4787fd64a4ca0dba5528b5651bddd254102fe9f3", + "problem_statement": "INTERNALERROR when exception in __repr__\nMinimal code to reproduce the issue: \r\n```python\r\nclass SomeClass:\r\n def __getattribute__(self, attr):\r\n raise\r\n def __repr__(self):\r\n raise\r\ndef test():\r\n SomeClass().attr\r\n```\r\nSession traceback:\r\n```\r\n============================= test session starts ==============================\r\nplatform darwin -- Python 3.8.1, pytest-5.4.1, py-1.8.1, pluggy-0.13.1 -- /usr/local/opt/python@3.8/bin/python3.8\r\ncachedir: .pytest_cache\r\nrootdir: ******\r\nplugins: asyncio-0.10.0, mock-3.0.0, cov-2.8.1\r\ncollecting ... collected 1 item\r\n\r\ntest_pytest.py::test \r\nINTERNALERROR> Traceback (most recent call last):\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/main.py\", line 191, in wrap_session\r\nINTERNALERROR> session.exitstatus = doit(config, session) or 0\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/main.py\", line 247, in _main\r\nINTERNALERROR> config.hook.pytest_runtestloop(session=session)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/hooks.py\", line 286, in __call__\r\nINTERNALERROR> return self._hookexec(self, self.get_hookimpls(), kwargs)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/manager.py\", line 93, in _hookexec\r\nINTERNALERROR> return self._inner_hookexec(hook, methods, kwargs)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/manager.py\", line 84, in \r\nINTERNALERROR> self._inner_hookexec = lambda hook, methods, kwargs: hook.multicall(\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/callers.py\", line 208, in _multicall\r\nINTERNALERROR> return outcome.get_result()\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/callers.py\", line 80, in get_result\r\nINTERNALERROR> raise ex[1].with_traceback(ex[2])\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/callers.py\", line 187, in _multicall\r\nINTERNALERROR> res = hook_impl.function(*args)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/main.py\", line 272, in pytest_runtestloop\r\nINTERNALERROR> item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/hooks.py\", line 286, in __call__\r\nINTERNALERROR> return self._hookexec(self, self.get_hookimpls(), kwargs)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/manager.py\", line 93, in _hookexec\r\nINTERNALERROR> return self._inner_hookexec(hook, methods, kwargs)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/manager.py\", line 84, in \r\nINTERNALERROR> self._inner_hookexec = lambda hook, methods, kwargs: hook.multicall(\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/callers.py\", line 208, in _multicall\r\nINTERNALERROR> return outcome.get_result()\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/callers.py\", line 80, in get_result\r\nINTERNALERROR> raise ex[1].with_traceback(ex[2])\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/callers.py\", line 187, in _multicall\r\nINTERNALERROR> res = hook_impl.function(*args)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/runner.py\", line 85, in pytest_runtest_protocol\r\nINTERNALERROR> runtestprotocol(item, nextitem=nextitem)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/runner.py\", line 100, in runtestprotocol\r\nINTERNALERROR> reports.append(call_and_report(item, \"call\", log))\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/runner.py\", line 188, in call_and_report\r\nINTERNALERROR> report = hook.pytest_runtest_makereport(item=item, call=call)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/hooks.py\", line 286, in __call__\r\nINTERNALERROR> return self._hookexec(self, self.get_hookimpls(), kwargs)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/manager.py\", line 93, in _hookexec\r\nINTERNALERROR> return self._inner_hookexec(hook, methods, kwargs)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/manager.py\", line 84, in \r\nINTERNALERROR> self._inner_hookexec = lambda hook, methods, kwargs: hook.multicall(\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/callers.py\", line 203, in _multicall\r\nINTERNALERROR> gen.send(outcome)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/skipping.py\", line 129, in pytest_runtest_makereport\r\nINTERNALERROR> rep = outcome.get_result()\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/callers.py\", line 80, in get_result\r\nINTERNALERROR> raise ex[1].with_traceback(ex[2])\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/pluggy/callers.py\", line 187, in _multicall\r\nINTERNALERROR> res = hook_impl.function(*args)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/runner.py\", line 260, in pytest_runtest_makereport\r\nINTERNALERROR> return TestReport.from_item_and_call(item, call)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/reports.py\", line 294, in from_item_and_call\r\nINTERNALERROR> longrepr = item.repr_failure(excinfo)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/python.py\", line 1513, in repr_failure\r\nINTERNALERROR> return self._repr_failure_py(excinfo, style=style)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/nodes.py\", line 355, in _repr_failure_py\r\nINTERNALERROR> return excinfo.getrepr(\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/_code/code.py\", line 634, in getrepr\r\nINTERNALERROR> return fmt.repr_excinfo(self)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/_code/code.py\", line 879, in repr_excinfo\r\nINTERNALERROR> reprtraceback = self.repr_traceback(excinfo_)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/_code/code.py\", line 823, in repr_traceback\r\nINTERNALERROR> reprentry = self.repr_traceback_entry(entry, einfo)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/_code/code.py\", line 784, in repr_traceback_entry\r\nINTERNALERROR> reprargs = self.repr_args(entry) if not short else None\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/_code/code.py\", line 693, in repr_args\r\nINTERNALERROR> args.append((argname, saferepr(argvalue)))\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/_io/saferepr.py\", line 82, in saferepr\r\nINTERNALERROR> return SafeRepr(maxsize).repr(obj)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/_io/saferepr.py\", line 51, in repr\r\nINTERNALERROR> s = _format_repr_exception(exc, x)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/_io/saferepr.py\", line 23, in _format_repr_exception\r\nINTERNALERROR> exc_info, obj.__class__.__name__, id(obj)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/_io/saferepr.py\", line 47, in repr\r\nINTERNALERROR> s = super().repr(x)\r\nINTERNALERROR> File \"/usr/local/Cellar/python@3.8/3.8.1/Frameworks/Python.framework/Versions/3.8/lib/python3.8/reprlib.py\", line 52, in repr\r\nINTERNALERROR> return self.repr1(x, self.maxlevel)\r\nINTERNALERROR> File \"/usr/local/Cellar/python@3.8/3.8.1/Frameworks/Python.framework/Versions/3.8/lib/python3.8/reprlib.py\", line 62, in repr1\r\nINTERNALERROR> return self.repr_instance(x, level)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/_io/saferepr.py\", line 60, in repr_instance\r\nINTERNALERROR> s = _format_repr_exception(exc, x)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/_io/saferepr.py\", line 23, in _format_repr_exception\r\nINTERNALERROR> exc_info, obj.__class__.__name__, id(obj)\r\nINTERNALERROR> File \"/usr/local/lib/python3.8/site-packages/_pytest/_io/saferepr.py\", line 56, in repr_instance\r\nINTERNALERROR> s = repr(x)\r\nINTERNALERROR> File \"/Users/stiflou/Documents/projets/apischema/tests/test_pytest.py\", line 6, in __repr__\r\nINTERNALERROR> raise\r\nINTERNALERROR> RuntimeError: No active exception to reraise\r\n\r\n============================ no tests ran in 0.09s ============================\r\n```\n", + "golden_patch": "diff --git a/src/_pytest/_io/saferepr.py b/src/_pytest/_io/saferepr.py\n--- a/src/_pytest/_io/saferepr.py\n+++ b/src/_pytest/_io/saferepr.py\n@@ -20,7 +20,7 @@ def _format_repr_exception(exc: BaseException, obj: Any) -> str:\n except BaseException as exc:\n exc_info = \"unpresentable exception ({})\".format(_try_repr_or_str(exc))\n return \"<[{} raised in repr()] {} object at 0x{:x}>\".format(\n- exc_info, obj.__class__.__name__, id(obj)\n+ exc_info, type(obj).__name__, id(obj)\n )\n \n \n", + "test_patch": "diff --git a/testing/io/test_saferepr.py b/testing/io/test_saferepr.py\n--- a/testing/io/test_saferepr.py\n+++ b/testing/io/test_saferepr.py\n@@ -154,3 +154,20 @@ def test_pformat_dispatch():\n assert _pformat_dispatch(\"a\") == \"'a'\"\n assert _pformat_dispatch(\"a\" * 10, width=5) == \"'aaaaaaaaaa'\"\n assert _pformat_dispatch(\"foo bar\", width=5) == \"('foo '\\n 'bar')\"\n+\n+\n+def test_broken_getattribute():\n+ \"\"\"saferepr() can create proper representations of classes with\n+ broken __getattribute__ (#7145)\n+ \"\"\"\n+\n+ class SomeClass:\n+ def __getattribute__(self, attr):\n+ raise RuntimeError\n+\n+ def __repr__(self):\n+ raise RuntimeError\n+\n+ assert saferepr(SomeClass()).startswith(\n+ \"<[RuntimeError() raised in repr()] SomeClass object at 0x\"\n+ )\n", + "fail_to_pass": "[\"testing/io/test_saferepr.py::test_simple_repr\", \"testing/io/test_saferepr.py::test_maxsize\", \"testing/io/test_saferepr.py::test_maxsize_error_on_instance\", \"testing/io/test_saferepr.py::test_exceptions\", \"testing/io/test_saferepr.py::test_baseexception\", \"testing/io/test_saferepr.py::test_buggy_builtin_repr\", \"testing/io/test_saferepr.py::test_big_repr\", \"testing/io/test_saferepr.py::test_repr_on_newstyle\", \"testing/io/test_saferepr.py::test_unicode\", \"testing/io/test_saferepr.py::test_pformat_dispatch\", \"testing/io/test_saferepr.py::test_broken_getattribute\"]", + "pass_to_pass": "[]", + "expected_spans": { + "src/_pytest/_io/saferepr.py": [ + "_format_repr_exception" + ] + }, + "test_file_spans": { + "testing/io/test_saferepr.py": [] + }, + "resolved_by": [ + { + "name": "20240828_autose_mixed", + "updated_spans": { + "src/_pytest/_io/saferepr.py": [ + "_format_repr_exception" + ] + }, + "alternative_spans": { + "src/_pytest/_io/saferepr.py": [ + "_format_repr_exception" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "src/_pytest/_io/saferepr.py": [ + "_format_repr_exception", + "_ellipsize", + "SafeRepr.__init__", + "SafeRepr.repr", + "SafeRepr.repr_instance", + "safeformat", + "saferepr", + "AlwaysDispatchingPrettyPrinter._format", + "_pformat_dispatch" + ] + }, + "alternative_spans": { + "src/_pytest/_io/saferepr.py": [ + "_format_repr_exception", + "_ellipsize", + "SafeRepr.__init__", + "SafeRepr.repr", + "SafeRepr.repr_instance", + "safeformat", + "saferepr", + "AlwaysDispatchingPrettyPrinter._format", + "_pformat_dispatch" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "src/_pytest/_io/saferepr.py": [ + "imports", + "_format_repr_exception" + ] + }, + "alternative_spans": { + "src/_pytest/_io/saferepr.py": [ + "imports", + "_format_repr_exception" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-12908", + "repo": "django/django", + "base_commit": "49ae7ce50a874f8a04cd910882fb9571ff3a0d7a", + "problem_statement": "Union queryset should raise on distinct().\nDescription\n\t \n\t\t(last modified by Sielc Technologies)\n\t \nAfter using\n.annotate() on 2 different querysets\nand then .union()\n.distinct() will not affect the queryset\n\tdef setUp(self) -> None:\n\t\tuser = self.get_or_create_admin_user()\n\t\tSample.h.create(user, name=\"Sam1\")\n\t\tSample.h.create(user, name=\"Sam2 acid\")\n\t\tSample.h.create(user, name=\"Sam3\")\n\t\tSample.h.create(user, name=\"Sam4 acid\")\n\t\tSample.h.create(user, name=\"Dub\")\n\t\tSample.h.create(user, name=\"Dub\")\n\t\tSample.h.create(user, name=\"Dub\")\n\t\tself.user = user\n\tdef test_union_annotated_diff_distinct(self):\n\t\tqs = Sample.objects.filter(user=self.user)\n\t\tqs1 = qs.filter(name='Dub').annotate(rank=Value(0, IntegerField()))\n\t\tqs2 = qs.filter(name='Sam1').annotate(rank=Value(1, IntegerField()))\n\t\tqs = qs1.union(qs2)\n\t\tqs = qs.order_by('name').distinct('name') # THIS DISTINCT DOESN'T WORK\n\t\tself.assertEqual(qs.count(), 2)\nexpected to get wrapped union\n\tSELECT DISTINCT ON (siebox_sample.name) * FROM (SELECT ... UNION SELECT ...) AS siebox_sample\n", + "golden_patch": "diff --git a/django/db/models/query.py b/django/db/models/query.py\n--- a/django/db/models/query.py\n+++ b/django/db/models/query.py\n@@ -1138,6 +1138,7 @@ def distinct(self, *field_names):\n \"\"\"\n Return a new QuerySet instance that will select only distinct results.\n \"\"\"\n+ self._not_support_combined_queries('distinct')\n assert not self.query.is_sliced, \\\n \"Cannot create distinct fields once a slice has been taken.\"\n obj = self._chain()\n", + "test_patch": "diff --git a/tests/queries/test_qs_combinators.py b/tests/queries/test_qs_combinators.py\n--- a/tests/queries/test_qs_combinators.py\n+++ b/tests/queries/test_qs_combinators.py\n@@ -272,6 +272,7 @@ def test_unsupported_operations_on_combined_qs(self):\n 'annotate',\n 'defer',\n 'delete',\n+ 'distinct',\n 'exclude',\n 'extra',\n 'filter',\n", + "fail_to_pass": "[\"test_unsupported_operations_on_combined_qs (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_unsupported_ordering_slicing_raises_db_error (queries.test_qs_combinators.QuerySetSetOperationTests)\"]", + "pass_to_pass": "[\"test_combining_multiple_models (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_count_difference (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_count_intersection (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_count_union (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_count_union_empty_result (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_difference_with_empty_qs (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_difference_with_values (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_intersection_with_empty_qs (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_intersection_with_values (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_limits (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_order_by_same_type (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_order_raises_on_non_selected_column (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_ordering (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_ordering_by_f_expression (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_qs_with_subcompound_qs (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_simple_difference (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_simple_intersection (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_simple_union (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_union_distinct (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_union_with_empty_qs (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_union_with_extra_and_values_list (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_union_with_two_annotated_values_list (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_union_with_values (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_union_with_values_list_and_order (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_union_with_values_list_on_annotated_and_unannotated (queries.test_qs_combinators.QuerySetSetOperationTests)\"]", + "expected_spans": { + "django/db/models/query.py": [ + "QuerySet.distinct" + ] + }, + "test_file_spans": { + "tests/queries/test_qs_combinators.py": [ + "QuerySetSetOperationTests.test_unsupported_operations_on_combined_qs" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-12915", + "repo": "django/django", + "base_commit": "4652f1f0aa459a7b980441d629648707c32e36bf", + "problem_statement": "Add get_response_async for ASGIStaticFilesHandler\nDescription\n\t\nIt looks like the StaticFilesHandlerMixin is missing the the async response function.\nWithout this, when trying to use the ASGIStaticFilesHandler, this is the traceback:\nException inside application: 'NoneType' object is not callable\nTraceback (most recent call last):\n File \".../lib/python3.7/site-packages/daphne/cli.py\", line 30, in asgi\n\tawait self.app(scope, receive, send)\n File \".../src/django/django/contrib/staticfiles/handlers.py\", line 86, in __call__\n\treturn await super().__call__(scope, receive, send)\n File \".../src/django/django/core/handlers/asgi.py\", line 161, in __call__\n\tresponse = await self.get_response_async(request)\n File \".../src/django/django/core/handlers/base.py\", line 148, in get_response_async\n\tresponse = await self._middleware_chain(request)\nTypeError: 'NoneType' object is not callable\n", + "golden_patch": "diff --git a/django/contrib/staticfiles/handlers.py b/django/contrib/staticfiles/handlers.py\n--- a/django/contrib/staticfiles/handlers.py\n+++ b/django/contrib/staticfiles/handlers.py\n@@ -1,6 +1,8 @@\n from urllib.parse import urlparse\n from urllib.request import url2pathname\n \n+from asgiref.sync import sync_to_async\n+\n from django.conf import settings\n from django.contrib.staticfiles import utils\n from django.contrib.staticfiles.views import serve\n@@ -52,6 +54,12 @@ def get_response(self, request):\n except Http404 as e:\n return response_for_exception(request, e)\n \n+ async def get_response_async(self, request):\n+ try:\n+ return await sync_to_async(self.serve)(request)\n+ except Http404 as e:\n+ return await sync_to_async(response_for_exception)(request, e)\n+\n \n class StaticFilesHandler(StaticFilesHandlerMixin, WSGIHandler):\n \"\"\"\n", + "test_patch": "diff --git a/tests/asgi/project/static/file.txt b/tests/asgi/project/static/file.txt\nnew file mode 100644\n--- /dev/null\n+++ b/tests/asgi/project/static/file.txt\n@@ -0,0 +1 @@\n+test\ndiff --git a/tests/asgi/tests.py b/tests/asgi/tests.py\n--- a/tests/asgi/tests.py\n+++ b/tests/asgi/tests.py\n@@ -1,18 +1,25 @@\n import asyncio\n import sys\n import threading\n+from pathlib import Path\n from unittest import skipIf\n \n from asgiref.sync import SyncToAsync\n from asgiref.testing import ApplicationCommunicator\n \n+from django.contrib.staticfiles.handlers import ASGIStaticFilesHandler\n from django.core.asgi import get_asgi_application\n from django.core.signals import request_finished, request_started\n from django.db import close_old_connections\n-from django.test import AsyncRequestFactory, SimpleTestCase, override_settings\n+from django.test import (\n+ AsyncRequestFactory, SimpleTestCase, modify_settings, override_settings,\n+)\n+from django.utils.http import http_date\n \n from .urls import test_filename\n \n+TEST_STATIC_ROOT = Path(__file__).parent / 'project' / 'static'\n+\n \n @skipIf(sys.platform == 'win32' and (3, 8, 0) < sys.version_info < (3, 8, 1), 'https://bugs.python.org/issue38563')\n @override_settings(ROOT_URLCONF='asgi.urls')\n@@ -79,6 +86,45 @@ async def test_file_response(self):\n # Allow response.close() to finish.\n await communicator.wait()\n \n+ @modify_settings(INSTALLED_APPS={'append': 'django.contrib.staticfiles'})\n+ @override_settings(\n+ STATIC_URL='/static/',\n+ STATIC_ROOT=TEST_STATIC_ROOT,\n+ STATICFILES_DIRS=[TEST_STATIC_ROOT],\n+ STATICFILES_FINDERS=[\n+ 'django.contrib.staticfiles.finders.FileSystemFinder',\n+ ],\n+ )\n+ async def test_static_file_response(self):\n+ application = ASGIStaticFilesHandler(get_asgi_application())\n+ # Construct HTTP request.\n+ scope = self.async_request_factory._base_scope(path='/static/file.txt')\n+ communicator = ApplicationCommunicator(application, scope)\n+ await communicator.send_input({'type': 'http.request'})\n+ # Get the file content.\n+ file_path = TEST_STATIC_ROOT / 'file.txt'\n+ with open(file_path, 'rb') as test_file:\n+ test_file_contents = test_file.read()\n+ # Read the response.\n+ stat = file_path.stat()\n+ response_start = await communicator.receive_output()\n+ self.assertEqual(response_start['type'], 'http.response.start')\n+ self.assertEqual(response_start['status'], 200)\n+ self.assertEqual(\n+ set(response_start['headers']),\n+ {\n+ (b'Content-Length', str(len(test_file_contents)).encode('ascii')),\n+ (b'Content-Type', b'text/plain'),\n+ (b'Content-Disposition', b'inline; filename=\"file.txt\"'),\n+ (b'Last-Modified', http_date(stat.st_mtime).encode('ascii')),\n+ },\n+ )\n+ response_body = await communicator.receive_output()\n+ self.assertEqual(response_body['type'], 'http.response.body')\n+ self.assertEqual(response_body['body'], test_file_contents)\n+ # Allow response.close() to finish.\n+ await communicator.wait()\n+\n async def test_headers(self):\n application = get_asgi_application()\n communicator = ApplicationCommunicator(\ndiff --git a/tests/staticfiles_tests/test_handlers.py b/tests/staticfiles_tests/test_handlers.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/staticfiles_tests/test_handlers.py\n@@ -0,0 +1,22 @@\n+from django.contrib.staticfiles.handlers import ASGIStaticFilesHandler\n+from django.core.handlers.asgi import ASGIHandler\n+from django.test import AsyncRequestFactory\n+\n+from .cases import StaticFilesTestCase\n+\n+\n+class TestASGIStaticFilesHandler(StaticFilesTestCase):\n+ async_request_factory = AsyncRequestFactory()\n+\n+ async def test_get_async_response(self):\n+ request = self.async_request_factory.get('/static/test/file.txt')\n+ handler = ASGIStaticFilesHandler(ASGIHandler())\n+ response = await handler.get_response_async(request)\n+ response.close()\n+ self.assertEqual(response.status_code, 200)\n+\n+ async def test_get_async_response_not_found(self):\n+ request = self.async_request_factory.get('/static/test/not-found.txt')\n+ handler = ASGIStaticFilesHandler(ASGIHandler())\n+ response = await handler.get_response_async(request)\n+ self.assertEqual(response.status_code, 404)\n", + "fail_to_pass": "[\"test_get_async_response (staticfiles_tests.test_handlers.TestASGIStaticFilesHandler)\", \"test_get_async_response_not_found (staticfiles_tests.test_handlers.TestASGIStaticFilesHandler)\", \"test_static_file_response (asgi.tests.ASGITest)\"]", + "pass_to_pass": "[\"test_disconnect (asgi.tests.ASGITest)\", \"test_file_response (asgi.tests.ASGITest)\", \"test_get_asgi_application (asgi.tests.ASGITest)\", \"test_get_query_string (asgi.tests.ASGITest)\", \"test_headers (asgi.tests.ASGITest)\", \"test_non_unicode_query_string (asgi.tests.ASGITest)\", \"test_request_lifecycle_signals_dispatched_with_thread_sensitive (asgi.tests.ASGITest)\", \"test_wrong_connection_type (asgi.tests.ASGITest)\"]", + "expected_spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports", + "StaticFilesHandlerMixin" + ] + }, + "test_file_spans": { + "tests/asgi/tests.py": [ + "imports", + "ASGITest.test_headers" + ] + }, + "resolved_by": [ + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin.get_response", + "ASGIStaticFilesHandler", + "ASGIStaticFilesHandler.__call__" + ] + }, + "alternative_spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin.get_response", + "ASGIStaticFilesHandler", + "ASGIStaticFilesHandler.__call__" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/contrib/staticfiles/handlers.py": [ + "ASGIStaticFilesHandler.__call__" + ] + }, + "alternative_spans": { + "django/contrib/staticfiles/handlers.py": [ + "ASGIStaticFilesHandler.__call__" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports", + "StaticFilesHandlerMixin.get_base_url", + "ASGIStaticFilesHandler.__call__" + ] + }, + "alternative_spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports", + "StaticFilesHandlerMixin.get_base_url", + "ASGIStaticFilesHandler.__call__" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin" + ] + }, + "alternative_spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports" + ] + }, + "alternative_spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/contrib/staticfiles/handlers.py": [ + "ASGIStaticFilesHandler.__call__" + ] + }, + "alternative_spans": { + "django/contrib/staticfiles/handlers.py": [ + "ASGIStaticFilesHandler.__call__" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin.get_response", + "ASGIStaticFilesHandler", + "ASGIStaticFilesHandler.__call__" + ] + }, + "alternative_spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin.get_response", + "ASGIStaticFilesHandler", + "ASGIStaticFilesHandler.__call__" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports", + "ASGIStaticFilesHandler", + "ASGIStaticFilesHandler.__call__" + ] + }, + "alternative_spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports", + "ASGIStaticFilesHandler", + "ASGIStaticFilesHandler.__call__" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin.get_response", + "ASGIStaticFilesHandler", + "ASGIStaticFilesHandler.__call__" + ] + }, + "alternative_spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin.get_response", + "ASGIStaticFilesHandler", + "ASGIStaticFilesHandler.__call__" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin.load_middleware", + "StaticFilesHandlerMixin", + "ASGIStaticFilesHandler.__init__" + ] + }, + "alternative_spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin.load_middleware", + "StaticFilesHandlerMixin", + "ASGIStaticFilesHandler.__init__" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports", + "ASGIStaticFilesHandler.__call__" + ] + }, + "alternative_spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports", + "ASGIStaticFilesHandler.__call__" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports", + "StaticFilesHandlerMixin.get_response" + ] + }, + "alternative_spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports", + "StaticFilesHandlerMixin.get_response" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports", + "StaticFilesHandlerMixin" + ] + }, + "alternative_spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports", + "StaticFilesHandlerMixin" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin" + ] + }, + "alternative_spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin.get_response", + "StaticFilesHandler", + "StaticFilesHandler.__init__" + ] + }, + "alternative_spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin.get_response", + "StaticFilesHandler", + "StaticFilesHandler.__init__" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240630_agentless_gpt4o", + "spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin.get_response", + "ASGIStaticFilesHandler", + "ASGIStaticFilesHandler.__call__" + ] + } + }, + { + "run_name": "20240925_hyperagent_lite1", + "spans": { + "django/contrib/staticfiles/handlers.py": [ + "ASGIStaticFilesHandler.__call__" + ] + } + }, + { + "run_name": "20240615_appmap-navie_gpt4o", + "spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports", + "StaticFilesHandlerMixin.get_base_url", + "ASGIStaticFilesHandler.__call__" + ] + } + }, + { + "run_name": "20240702_codestory_aide_mixed", + "spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin" + ] + } + }, + { + "run_name": "20240820_honeycomb", + "spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports" + ] + } + }, + { + "run_name": "20240808_RepoGraph_gpt4o", + "spans": { + "django/contrib/staticfiles/handlers.py": [ + "ASGIStaticFilesHandler.__call__" + ] + } + }, + { + "run_name": "20240829_Isoform", + "spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin.get_response", + "ASGIStaticFilesHandler", + "ASGIStaticFilesHandler.__call__" + ] + } + }, + { + "run_name": "20240604_CodeR", + "spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports", + "ASGIStaticFilesHandler", + "ASGIStaticFilesHandler.__call__" + ] + } + }, + { + "run_name": "20241028_agentless-1.5_gpt4o", + "spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin.get_response", + "ASGIStaticFilesHandler", + "ASGIStaticFilesHandler.__call__" + ] + } + }, + { + "run_name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin.load_middleware", + "StaticFilesHandlerMixin", + "ASGIStaticFilesHandler.__init__" + ] + } + }, + { + "run_name": "20240617_factory_code_droid", + "spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports", + "ASGIStaticFilesHandler.__call__" + ] + } + }, + { + "run_name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "spans": { + "django/contrib/staticfiles/handlers.py": [ + "imports", + "StaticFilesHandlerMixin.get_response" + ] + } + }, + { + "run_name": "20240617_moatless_gpt4o", + "spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin" + ] + } + }, + { + "run_name": "20240509_amazon-q-developer-agent-20240430-dev", + "spans": { + "django/contrib/staticfiles/handlers.py": [ + "StaticFilesHandlerMixin.get_response", + "StaticFilesHandler", + "StaticFilesHandler.__init__" + ] + } + } + ] + }, + { + "instance_id": "pytest-dev__pytest-7220", + "repo": "pytest-dev/pytest", + "base_commit": "56bf819c2f4eaf8b36bd8c42c06bb59d5a3bfc0f", + "problem_statement": "Wrong path to test file when directory changed in fixture\nFiles are shown as relative to new directory when working directory is changed in a fixture. This makes it impossible to jump to the error as the editor is unaware of the directory change. The displayed directory should stay relative to the original directory.\r\n\r\ntest_path_error.py:\r\n```python\r\nimport os\r\nimport errno\r\nimport shutil\r\n\r\nimport pytest\r\n\r\n\r\n@pytest.fixture\r\ndef private_dir(): # or (monkeypatch)\r\n out_dir = 'ddd'\r\n\r\n try:\r\n shutil.rmtree(out_dir)\r\n except OSError as ex:\r\n if ex.errno != errno.ENOENT:\r\n raise\r\n os.mkdir(out_dir)\r\n\r\n old_dir = os.getcwd()\r\n os.chdir(out_dir)\r\n yield out_dir\r\n os.chdir(old_dir)\r\n\r\n # Same issue if using:\r\n # monkeypatch.chdir(out_dir)\r\n\r\n\r\ndef test_show_wrong_path(private_dir):\r\n assert False\r\n```\r\n\r\n```diff\r\n+ Expected: test_path_error.py:29: AssertionError\r\n- Displayed: ../test_path_error.py:29: AssertionError\r\n```\r\n\r\nThe full output is:\r\n```\r\n-*- mode: compilation; default-directory: \"~/src/pytest_path_error/\" -*-\r\nCompilation started at Fri Jan 10 00:05:52\r\n\r\nnox\r\nnox > Running session test\r\nnox > Creating virtual environment (virtualenv) using python3.7 in .nox/test\r\nnox > pip install pytest>=5.3\r\nnox > pip freeze\r\nattrs==19.3.0\r\nimportlib-metadata==1.3.0\r\nmore-itertools==8.0.2\r\npackaging==20.0\r\npluggy==0.13.1\r\npy==1.8.1\r\npyparsing==2.4.6\r\npytest==5.3.2\r\nsix==1.13.0\r\nwcwidth==0.1.8\r\nzipp==0.6.0\r\nnox > pytest \r\n================================= test session starts =================================\r\nplatform linux -- Python 3.7.5, pytest-5.3.2, py-1.8.1, pluggy-0.13.1\r\nrootdir: /home/lhn/src/pytest_path_error\r\ncollected 1 item \r\n\r\ntest_path_error.py F [100%]\r\n\r\n====================================== FAILURES =======================================\r\n________________________________ test_show_wrong_path _________________________________\r\n\r\nprivate_dir = 'ddd'\r\n\r\n def test_show_wrong_path(private_dir):\r\n> assert False\r\nE assert False\r\n\r\n../test_path_error.py:29: AssertionError\r\n================================== 1 failed in 0.03s ==================================\r\nnox > Command pytest failed with exit code 1\r\nnox > Session test failed.\r\n\r\nCompilation exited abnormally with code 1 at Fri Jan 10 00:06:01\r\n```\r\n\r\nnoxfile.py:\r\n```python\r\nimport nox\r\n\r\n@nox.session(python='3.7')\r\ndef test(session):\r\n session.install('pytest>=5.3')\r\n session.run('pip', 'freeze')\r\n session.run('pytest')\r\n```\n", + "golden_patch": "diff --git a/src/_pytest/nodes.py b/src/_pytest/nodes.py\n--- a/src/_pytest/nodes.py\n+++ b/src/_pytest/nodes.py\n@@ -29,6 +29,7 @@\n from _pytest.mark.structures import MarkDecorator\n from _pytest.mark.structures import NodeKeywords\n from _pytest.outcomes import fail\n+from _pytest.pathlib import Path\n from _pytest.store import Store\n \n if TYPE_CHECKING:\n@@ -361,9 +362,14 @@ def _repr_failure_py(\n else:\n truncate_locals = True\n \n+ # excinfo.getrepr() formats paths relative to the CWD if `abspath` is False.\n+ # It is possible for a fixture/test to change the CWD while this code runs, which\n+ # would then result in the user seeing confusing paths in the failure message.\n+ # To fix this, if the CWD changed, always display the full absolute path.\n+ # It will be better to just always display paths relative to invocation_dir, but\n+ # this requires a lot of plumbing (#6428).\n try:\n- os.getcwd()\n- abspath = False\n+ abspath = Path(os.getcwd()) != Path(self.config.invocation_dir)\n except OSError:\n abspath = True\n \n", + "test_patch": "diff --git a/testing/test_nodes.py b/testing/test_nodes.py\n--- a/testing/test_nodes.py\n+++ b/testing/test_nodes.py\n@@ -58,3 +58,30 @@ class FakeSession:\n \n outside = py.path.local(\"/outside\")\n assert nodes._check_initialpaths_for_relpath(FakeSession, outside) is None\n+\n+\n+def test_failure_with_changed_cwd(testdir):\n+ \"\"\"\n+ Test failure lines should use absolute paths if cwd has changed since\n+ invocation, so the path is correct (#6428).\n+ \"\"\"\n+ p = testdir.makepyfile(\n+ \"\"\"\n+ import os\n+ import pytest\n+\n+ @pytest.fixture\n+ def private_dir():\n+ out_dir = 'ddd'\n+ os.mkdir(out_dir)\n+ old_dir = os.getcwd()\n+ os.chdir(out_dir)\n+ yield out_dir\n+ os.chdir(old_dir)\n+\n+ def test_show_wrong_path(private_dir):\n+ assert False\n+ \"\"\"\n+ )\n+ result = testdir.runpytest()\n+ result.stdout.fnmatch_lines([str(p) + \":*: AssertionError\", \"*1 failed in *\"])\n", + "fail_to_pass": "[\"testing/test_nodes.py::test_failure_with_changed_cwd\"]", + "pass_to_pass": "[\"testing/test_nodes.py::test_ischildnode[--True]\", \"testing/test_nodes.py::test_ischildnode[-foo-True]\", \"testing/test_nodes.py::test_ischildnode[-foo/bar-True]\", \"testing/test_nodes.py::test_ischildnode[-foo/bar::TestBaz-True]\", \"testing/test_nodes.py::test_ischildnode[foo-food-False]\", \"testing/test_nodes.py::test_ischildnode[foo/bar::TestBaz-foo/bar-False]\", \"testing/test_nodes.py::test_ischildnode[foo/bar::TestBaz-foo/bar::TestBop-False]\", \"testing/test_nodes.py::test_ischildnode[foo/bar-foo/bar::TestBop-True]\", \"testing/test_nodes.py::test_node_from_parent_disallowed_arguments\", \"testing/test_nodes.py::test__check_initialpaths_for_relpath\", \"testing/test_nodes.py::test_std_warn_not_pytestwarning\"]", + "expected_spans": { + "src/_pytest/nodes.py": [ + "imports", + "Node._repr_failure_py" + ] + }, + "test_file_spans": { + "testing/test_nodes.py": [] + }, + "resolved_by": [ + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "src/_pytest/nodes.py": [ + "Node._repr_failure_py" + ] + }, + "alternative_spans": { + "src/_pytest/nodes.py": [ + "Node._repr_failure_py" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "src/_pytest/nodes.py": [ + "Node._repr_failure_py" + ] + }, + "alternative_spans": { + "src/_pytest/nodes.py": [ + "Node._repr_failure_py" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "src/_pytest/nodes.py": [ + "imports", + "Node", + "Node.from_parent", + "Node._repr_failure_py", + "Node.repr_failure" + ] + }, + "alternative_spans": { + "src/_pytest/nodes.py": [ + "imports", + "Node", + "Node.from_parent", + "Node._repr_failure_py", + "Node.repr_failure" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "src/_pytest/_code/code.py": [ + "imports", + "ReprFileLocation" + ], + "src/_pytest/nodes.py": [ + "_splitnode" + ] + }, + "alternative_spans": { + "src/_pytest/nodes.py": [ + "_splitnode" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240808_RepoGraph_gpt4o", + "spans": { + "src/_pytest/nodes.py": [ + "Node._repr_failure_py" + ] + } + }, + { + "run_name": "20241028_agentless-1.5_gpt4o", + "spans": { + "src/_pytest/nodes.py": [ + "Node._repr_failure_py" + ] + } + }, + { + "run_name": "20240620_sweagent_claude3.5sonnet", + "spans": { + "src/_pytest/nodes.py": [ + "_splitnode" + ] + } + } + ] + }, + { + "instance_id": "sphinx-doc__sphinx-7686", + "repo": "sphinx-doc/sphinx", + "base_commit": "752d3285d250bbaf673cff25e83f03f247502021", + "problem_statement": "autosummary: The members variable for module template contains imported members\n**Describe the bug**\r\nautosummary: The members variable for module template contains imported members even if autosummary_imported_members is False.\r\n\r\n**To Reproduce**\r\n\r\n```\r\n# _templates/autosummary/module.rst\r\n{{ fullname | escape | underline }}\r\n\r\n.. automodule:: {{ fullname }}\r\n\r\n .. autosummary::\r\n {% for item in members %}\r\n {{ item }}\r\n {%- endfor %}\r\n\r\n```\r\n```\r\n# example.py\r\nimport os\r\n```\r\n```\r\n# index.rst\r\n.. autosummary::\r\n :toctree: generated\r\n\r\n example\r\n```\r\n```\r\n# conf.py\r\nautosummary_generate = True\r\nautosummary_imported_members = False\r\n```\r\n\r\nAs a result, I got following output:\r\n```\r\n# generated/example.rst\r\nexample\r\n=======\r\n\r\n.. automodule:: example\r\n\r\n .. autosummary::\r\n\r\n __builtins__\r\n __cached__\r\n __doc__\r\n __file__\r\n __loader__\r\n __name__\r\n __package__\r\n __spec__\r\n os\r\n```\r\n\r\n**Expected behavior**\r\nThe template variable `members` should not contain imported members when `autosummary_imported_members` is False.\r\n\r\n**Your project**\r\nNo\r\n\r\n**Screenshots**\r\nNo\r\n\r\n**Environment info**\r\n- OS: Mac\r\n- Python version: 3.8.2\r\n- Sphinx version: 3.1.0dev\r\n- Sphinx extensions: sphinx.ext.autosummary\r\n- Extra tools: No\r\n\r\n**Additional context**\r\nNo\r\n\n", + "golden_patch": "diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py\n--- a/sphinx/ext/autosummary/generate.py\n+++ b/sphinx/ext/autosummary/generate.py\n@@ -18,6 +18,7 @@\n \"\"\"\n \n import argparse\n+import inspect\n import locale\n import os\n import pkgutil\n@@ -176,6 +177,56 @@ def render(self, template_name: str, context: Dict) -> str:\n # -- Generating output ---------------------------------------------------------\n \n \n+class ModuleScanner:\n+ def __init__(self, app: Any, obj: Any) -> None:\n+ self.app = app\n+ self.object = obj\n+\n+ def get_object_type(self, name: str, value: Any) -> str:\n+ return get_documenter(self.app, value, self.object).objtype\n+\n+ def is_skipped(self, name: str, value: Any, objtype: str) -> bool:\n+ try:\n+ return self.app.emit_firstresult('autodoc-skip-member', objtype,\n+ name, value, False, {})\n+ except Exception as exc:\n+ logger.warning(__('autosummary: failed to determine %r to be documented, '\n+ 'the following exception was raised:\\n%s'),\n+ name, exc, type='autosummary')\n+ return False\n+\n+ def scan(self, imported_members: bool) -> List[str]:\n+ members = []\n+ for name in dir(self.object):\n+ try:\n+ value = safe_getattr(self.object, name)\n+ except AttributeError:\n+ value = None\n+\n+ objtype = self.get_object_type(name, value)\n+ if self.is_skipped(name, value, objtype):\n+ continue\n+\n+ try:\n+ if inspect.ismodule(value):\n+ imported = True\n+ elif safe_getattr(value, '__module__') != self.object.__name__:\n+ imported = True\n+ else:\n+ imported = False\n+ except AttributeError:\n+ imported = False\n+\n+ if imported_members:\n+ # list all members up\n+ members.append(name)\n+ elif imported is False:\n+ # list not-imported members up\n+ members.append(name)\n+\n+ return members\n+\n+\n def generate_autosummary_content(name: str, obj: Any, parent: Any,\n template: AutosummaryRenderer, template_name: str,\n imported_members: bool, app: Any,\n@@ -246,7 +297,8 @@ def get_modules(obj: Any) -> Tuple[List[str], List[str]]:\n ns.update(context)\n \n if doc.objtype == 'module':\n- ns['members'] = dir(obj)\n+ scanner = ModuleScanner(app, obj)\n+ ns['members'] = scanner.scan(imported_members)\n ns['functions'], ns['all_functions'] = \\\n get_members(obj, {'function'}, imported=imported_members)\n ns['classes'], ns['all_classes'] = \\\n", + "test_patch": "diff --git a/tests/roots/test-ext-autosummary/autosummary_dummy_module.py b/tests/roots/test-ext-autosummary/autosummary_dummy_module.py\n--- a/tests/roots/test-ext-autosummary/autosummary_dummy_module.py\n+++ b/tests/roots/test-ext-autosummary/autosummary_dummy_module.py\n@@ -1,4 +1,4 @@\n-from os import * # NOQA\n+from os import path # NOQA\n from typing import Union\n \n \n@@ -17,7 +17,23 @@ def baz(self):\n pass\n \n \n-def bar(x: Union[int, str], y: int = 1):\n+class _Baz:\n+ pass\n+\n+\n+def bar(x: Union[int, str], y: int = 1) -> None:\n+ pass\n+\n+\n+def _quux():\n+ pass\n+\n+\n+class Exc(Exception):\n+ pass\n+\n+\n+class _Exc(Exception):\n pass\n \n \ndiff --git a/tests/test_ext_autosummary.py b/tests/test_ext_autosummary.py\n--- a/tests/test_ext_autosummary.py\n+++ b/tests/test_ext_autosummary.py\n@@ -19,7 +19,10 @@\n from sphinx.ext.autosummary import (\n autosummary_table, autosummary_toc, mangle_signature, import_by_name, extract_summary\n )\n-from sphinx.ext.autosummary.generate import AutosummaryEntry, generate_autosummary_docs, main as autogen_main\n+from sphinx.ext.autosummary.generate import (\n+ AutosummaryEntry, generate_autosummary_content, generate_autosummary_docs,\n+ main as autogen_main\n+)\n from sphinx.testing.util import assert_node, etree_parse\n from sphinx.util.docutils import new_document\n from sphinx.util.osutil import cd\n@@ -189,6 +192,83 @@ def test_escaping(app, status, warning):\n assert str_content(title) == 'underscore_module_'\n \n \n+@pytest.mark.sphinx(testroot='ext-autosummary')\n+def test_autosummary_generate_content_for_module(app):\n+ import autosummary_dummy_module\n+ template = Mock()\n+\n+ generate_autosummary_content('autosummary_dummy_module', autosummary_dummy_module, None,\n+ template, None, False, app, False, {})\n+ assert template.render.call_args[0][0] == 'module'\n+\n+ context = template.render.call_args[0][1]\n+ assert context['members'] == ['Exc', 'Foo', '_Baz', '_Exc', '__builtins__',\n+ '__cached__', '__doc__', '__file__', '__name__',\n+ '__package__', '_quux', 'bar', 'qux']\n+ assert context['functions'] == ['bar']\n+ assert context['all_functions'] == ['_quux', 'bar']\n+ assert context['classes'] == ['Foo']\n+ assert context['all_classes'] == ['Foo', '_Baz']\n+ assert context['exceptions'] == ['Exc']\n+ assert context['all_exceptions'] == ['Exc', '_Exc']\n+ assert context['attributes'] == ['qux']\n+ assert context['all_attributes'] == ['qux']\n+ assert context['fullname'] == 'autosummary_dummy_module'\n+ assert context['module'] == 'autosummary_dummy_module'\n+ assert context['objname'] == ''\n+ assert context['name'] == ''\n+ assert context['objtype'] == 'module'\n+\n+\n+@pytest.mark.sphinx(testroot='ext-autosummary')\n+def test_autosummary_generate_content_for_module_skipped(app):\n+ import autosummary_dummy_module\n+ template = Mock()\n+\n+ def skip_member(app, what, name, obj, skip, options):\n+ if name in ('Foo', 'bar', 'Exc'):\n+ return True\n+\n+ app.connect('autodoc-skip-member', skip_member)\n+ generate_autosummary_content('autosummary_dummy_module', autosummary_dummy_module, None,\n+ template, None, False, app, False, {})\n+ context = template.render.call_args[0][1]\n+ assert context['members'] == ['_Baz', '_Exc', '__builtins__', '__cached__', '__doc__',\n+ '__file__', '__name__', '__package__', '_quux', 'qux']\n+ assert context['functions'] == []\n+ assert context['classes'] == []\n+ assert context['exceptions'] == []\n+\n+\n+@pytest.mark.sphinx(testroot='ext-autosummary')\n+def test_autosummary_generate_content_for_module_imported_members(app):\n+ import autosummary_dummy_module\n+ template = Mock()\n+\n+ generate_autosummary_content('autosummary_dummy_module', autosummary_dummy_module, None,\n+ template, None, True, app, False, {})\n+ assert template.render.call_args[0][0] == 'module'\n+\n+ context = template.render.call_args[0][1]\n+ assert context['members'] == ['Exc', 'Foo', 'Union', '_Baz', '_Exc', '__builtins__',\n+ '__cached__', '__doc__', '__file__', '__loader__',\n+ '__name__', '__package__', '__spec__', '_quux',\n+ 'bar', 'path', 'qux']\n+ assert context['functions'] == ['bar']\n+ assert context['all_functions'] == ['_quux', 'bar']\n+ assert context['classes'] == ['Foo']\n+ assert context['all_classes'] == ['Foo', '_Baz']\n+ assert context['exceptions'] == ['Exc']\n+ assert context['all_exceptions'] == ['Exc', '_Exc']\n+ assert context['attributes'] == ['qux']\n+ assert context['all_attributes'] == ['qux']\n+ assert context['fullname'] == 'autosummary_dummy_module'\n+ assert context['module'] == 'autosummary_dummy_module'\n+ assert context['objname'] == ''\n+ assert context['name'] == ''\n+ assert context['objtype'] == 'module'\n+\n+\n @pytest.mark.sphinx('dummy', testroot='ext-autosummary')\n def test_autosummary_generate(app, status, warning):\n app.builder.build_all()\n", + "fail_to_pass": "[\"tests/test_ext_autosummary.py::test_autosummary_generate_content_for_module\", \"tests/test_ext_autosummary.py::test_autosummary_generate_content_for_module_skipped\"]", + "pass_to_pass": "[\"tests/test_ext_autosummary.py::test_mangle_signature\", \"tests/test_ext_autosummary.py::test_escaping\", \"tests/test_ext_autosummary.py::test_autosummary_generate\", \"tests/test_ext_autosummary.py::test_autosummary_generate_overwrite1\", \"tests/test_ext_autosummary.py::test_autosummary_generate_overwrite2\", \"tests/test_ext_autosummary.py::test_autosummary_recursive\", \"tests/test_ext_autosummary.py::test_autosummary_latex_table_colspec\", \"tests/test_ext_autosummary.py::test_import_by_name\", \"tests/test_ext_autosummary.py::test_autosummary_imported_members\", \"tests/test_ext_autosummary.py::test_generate_autosummary_docs_property\", \"tests/test_ext_autosummary.py::test_autosummary_skip_member\", \"tests/test_ext_autosummary.py::test_autosummary_template\", \"tests/test_ext_autosummary.py::test_empty_autosummary_generate\", \"tests/test_ext_autosummary.py::test_invalid_autosummary_generate\", \"tests/test_ext_autosummary.py::test_autogen\"]", + "expected_spans": { + "sphinx/ext/autosummary/generate.py": [ + "imports", + "generate_autosummary_content" + ] + }, + "test_file_spans": { + "tests/roots/test-ext-autosummary/autosummary_dummy_module.py": [ + "imports", + "bar" + ], + "tests/test_ext_autosummary.py": [ + "imports", + "test_autosummary_generate" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "pydata__xarray-4094", + "repo": "pydata/xarray", + "base_commit": "a64cf2d5476e7bbda099b34c40b7be1880dbd39a", + "problem_statement": "to_unstacked_dataset broken for single-dim variables\n\r\n\r\n\r\n#### MCVE Code Sample\r\n\r\n```python\r\narr = xr.DataArray(\r\n np.arange(3),\r\n coords=[(\"x\", [0, 1, 2])],\r\n )\r\ndata = xr.Dataset({\"a\": arr, \"b\": arr})\r\nstacked = data.to_stacked_array('y', sample_dims=['x'])\r\nunstacked = stacked.to_unstacked_dataset('y')\r\n# MergeError: conflicting values for variable 'y' on objects to be combined. You can skip this check by specifying compat='override'.\r\n```\r\n\r\n#### Expected Output\r\nA working roundtrip.\r\n\r\n#### Problem Description\r\nI need to stack a bunch of variables and later unstack them again, however this doesn't work if the variables only have a single dimension.\r\n\r\n#### Versions\r\n\r\n
Output of xr.show_versions()\r\n\r\nINSTALLED VERSIONS\r\n------------------\r\ncommit: None\r\npython: 3.7.3 (default, Mar 27 2019, 22:11:17) \r\n[GCC 7.3.0]\r\npython-bits: 64\r\nOS: Linux\r\nOS-release: 4.15.0-96-generic\r\nmachine: x86_64\r\nprocessor: x86_64\r\nbyteorder: little\r\nLC_ALL: None\r\nLANG: en_GB.UTF-8\r\nLOCALE: en_GB.UTF-8\r\nlibhdf5: 1.10.4\r\nlibnetcdf: 4.6.2\r\n\r\nxarray: 0.15.1\r\npandas: 1.0.3\r\nnumpy: 1.17.3\r\nscipy: 1.3.1\r\nnetCDF4: 1.4.2\r\npydap: None\r\nh5netcdf: None\r\nh5py: 2.10.0\r\nNio: None\r\nzarr: None\r\ncftime: 1.0.4.2\r\nnc_time_axis: None\r\nPseudoNetCDF: None\r\nrasterio: None\r\ncfgrib: None\r\niris: None\r\nbottleneck: None\r\ndask: 2.10.1\r\ndistributed: 2.10.0\r\nmatplotlib: 3.1.1\r\ncartopy: None\r\nseaborn: 0.10.0\r\nnumbagg: None\r\nsetuptools: 41.0.0\r\npip: 19.0.3\r\nconda: 4.8.3\r\npytest: 5.3.5\r\nIPython: 7.9.0\r\nsphinx: None\r\n\r\n\r\n
\r\n\n", + "golden_patch": "diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py\n--- a/xarray/core/dataarray.py\n+++ b/xarray/core/dataarray.py\n@@ -1961,7 +1961,7 @@ def to_unstacked_dataset(self, dim, level=0):\n # pull variables out of datarray\n data_dict = {}\n for k in variables:\n- data_dict[k] = self.sel({variable_dim: k}).squeeze(drop=True)\n+ data_dict[k] = self.sel({variable_dim: k}, drop=True).squeeze(drop=True)\n \n # unstacked dataset\n return Dataset(data_dict)\n", + "test_patch": "diff --git a/xarray/tests/test_dataset.py b/xarray/tests/test_dataset.py\n--- a/xarray/tests/test_dataset.py\n+++ b/xarray/tests/test_dataset.py\n@@ -3031,6 +3031,14 @@ def test_to_stacked_array_dtype_dims(self):\n assert y.dims == (\"x\", \"features\")\n \n def test_to_stacked_array_to_unstacked_dataset(self):\n+\n+ # single dimension: regression test for GH4049\n+ arr = xr.DataArray(np.arange(3), coords=[(\"x\", [0, 1, 2])])\n+ data = xr.Dataset({\"a\": arr, \"b\": arr})\n+ stacked = data.to_stacked_array(\"y\", sample_dims=[\"x\"])\n+ unstacked = stacked.to_unstacked_dataset(\"y\")\n+ assert_identical(unstacked, data)\n+\n # make a two dimensional dataset\n a, b = create_test_stacked_array()\n D = xr.Dataset({\"a\": a, \"b\": b})\n", + "fail_to_pass": "[\"xarray/tests/test_dataset.py::TestDataset::test_to_stacked_array_to_unstacked_dataset\"]", + "pass_to_pass": "[\"xarray/tests/test_dataset.py::TestDataset::test_repr\", \"xarray/tests/test_dataset.py::TestDataset::test_repr_multiindex\", \"xarray/tests/test_dataset.py::TestDataset::test_repr_period_index\", \"xarray/tests/test_dataset.py::TestDataset::test_unicode_data\", \"xarray/tests/test_dataset.py::TestDataset::test_repr_nep18\", \"xarray/tests/test_dataset.py::TestDataset::test_info\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_invalid_dims\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_1d\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_0d\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_deprecated\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_auto_align\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_pandas_sequence\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_pandas_single\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_compat\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_with_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_properties\", \"xarray/tests/test_dataset.py::TestDataset::test_asarray\", \"xarray/tests/test_dataset.py::TestDataset::test_get_index\", \"xarray/tests/test_dataset.py::TestDataset::test_attr_access\", \"xarray/tests/test_dataset.py::TestDataset::test_variable\", \"xarray/tests/test_dataset.py::TestDataset::test_modify_inplace\", \"xarray/tests/test_dataset.py::TestDataset::test_coords_properties\", \"xarray/tests/test_dataset.py::TestDataset::test_coords_modify\", \"xarray/tests/test_dataset.py::TestDataset::test_update_index\", \"xarray/tests/test_dataset.py::TestDataset::test_coords_setitem_with_new_dimension\", \"xarray/tests/test_dataset.py::TestDataset::test_coords_setitem_multiindex\", \"xarray/tests/test_dataset.py::TestDataset::test_coords_set\", \"xarray/tests/test_dataset.py::TestDataset::test_coords_to_dataset\", \"xarray/tests/test_dataset.py::TestDataset::test_coords_merge\", \"xarray/tests/test_dataset.py::TestDataset::test_coords_merge_mismatched_shape\", \"xarray/tests/test_dataset.py::TestDataset::test_data_vars_properties\", \"xarray/tests/test_dataset.py::TestDataset::test_equals_and_identical\", \"xarray/tests/test_dataset.py::TestDataset::test_equals_failures\", \"xarray/tests/test_dataset.py::TestDataset::test_broadcast_equals\", \"xarray/tests/test_dataset.py::TestDataset::test_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_chunk\", \"xarray/tests/test_dataset.py::TestDataset::test_dask_is_lazy\", \"xarray/tests/test_dataset.py::TestDataset::test_isel\", \"xarray/tests/test_dataset.py::TestDataset::test_isel_fancy\", \"xarray/tests/test_dataset.py::TestDataset::test_isel_dataarray\", \"xarray/tests/test_dataset.py::TestDataset::test_sel\", \"xarray/tests/test_dataset.py::TestDataset::test_sel_dataarray\", \"xarray/tests/test_dataset.py::TestDataset::test_sel_dataarray_mindex\", \"xarray/tests/test_dataset.py::TestDataset::test_categorical_index\", \"xarray/tests/test_dataset.py::TestDataset::test_categorical_reindex\", \"xarray/tests/test_dataset.py::TestDataset::test_sel_drop\", \"xarray/tests/test_dataset.py::TestDataset::test_isel_drop\", \"xarray/tests/test_dataset.py::TestDataset::test_head\", \"xarray/tests/test_dataset.py::TestDataset::test_tail\", \"xarray/tests/test_dataset.py::TestDataset::test_thin\", \"xarray/tests/test_dataset.py::TestDataset::test_sel_fancy\", \"xarray/tests/test_dataset.py::TestDataset::test_sel_method\", \"xarray/tests/test_dataset.py::TestDataset::test_loc\", \"xarray/tests/test_dataset.py::TestDataset::test_selection_multiindex\", \"xarray/tests/test_dataset.py::TestDataset::test_broadcast_like\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_like\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_warning\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_variables_copied\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_method\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_fill_value[fill_value0]\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_fill_value[2]\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_fill_value[2.0]\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_like_fill_value[fill_value0]\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_like_fill_value[2]\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_like_fill_value[2.0]\", \"xarray/tests/test_dataset.py::TestDataset::test_align_fill_value[fill_value0]\", \"xarray/tests/test_dataset.py::TestDataset::test_align_fill_value[2]\", \"xarray/tests/test_dataset.py::TestDataset::test_align_fill_value[2.0]\", \"xarray/tests/test_dataset.py::TestDataset::test_align\", \"xarray/tests/test_dataset.py::TestDataset::test_align_exact\", \"xarray/tests/test_dataset.py::TestDataset::test_align_override\", \"xarray/tests/test_dataset.py::TestDataset::test_align_exclude\", \"xarray/tests/test_dataset.py::TestDataset::test_align_nocopy\", \"xarray/tests/test_dataset.py::TestDataset::test_align_indexes\", \"xarray/tests/test_dataset.py::TestDataset::test_align_non_unique\", \"xarray/tests/test_dataset.py::TestDataset::test_broadcast\", \"xarray/tests/test_dataset.py::TestDataset::test_broadcast_nocopy\", \"xarray/tests/test_dataset.py::TestDataset::test_broadcast_exclude\", \"xarray/tests/test_dataset.py::TestDataset::test_broadcast_misaligned\", \"xarray/tests/test_dataset.py::TestDataset::test_variable_indexing\", \"xarray/tests/test_dataset.py::TestDataset::test_drop_variables\", \"xarray/tests/test_dataset.py::TestDataset::test_drop_index_labels\", \"xarray/tests/test_dataset.py::TestDataset::test_drop_labels_by_keyword\", \"xarray/tests/test_dataset.py::TestDataset::test_drop_dims\", \"xarray/tests/test_dataset.py::TestDataset::test_copy\", \"xarray/tests/test_dataset.py::TestDataset::test_copy_with_data\", \"xarray/tests/test_dataset.py::TestDataset::test_copy_with_data_errors\", \"xarray/tests/test_dataset.py::TestDataset::test_rename\", \"xarray/tests/test_dataset.py::TestDataset::test_rename_old_name\", \"xarray/tests/test_dataset.py::TestDataset::test_rename_same_name\", \"xarray/tests/test_dataset.py::TestDataset::test_rename_inplace\", \"xarray/tests/test_dataset.py::TestDataset::test_rename_dims\", \"xarray/tests/test_dataset.py::TestDataset::test_rename_vars\", \"xarray/tests/test_dataset.py::TestDataset::test_rename_multiindex\", \"xarray/tests/test_dataset.py::TestDataset::test_rename_does_not_change_CFTimeIndex_type\", \"xarray/tests/test_dataset.py::TestDataset::test_rename_does_not_change_DatetimeIndex_type\", \"xarray/tests/test_dataset.py::TestDataset::test_swap_dims\", \"xarray/tests/test_dataset.py::TestDataset::test_expand_dims_error\", \"xarray/tests/test_dataset.py::TestDataset::test_expand_dims_int\", \"xarray/tests/test_dataset.py::TestDataset::test_expand_dims_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_expand_dims_existing_scalar_coord\", \"xarray/tests/test_dataset.py::TestDataset::test_isel_expand_dims_roundtrip\", \"xarray/tests/test_dataset.py::TestDataset::test_expand_dims_mixed_int_and_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_expand_dims_kwargs_python36plus\", \"xarray/tests/test_dataset.py::TestDataset::test_set_index\", \"xarray/tests/test_dataset.py::TestDataset::test_reset_index\", \"xarray/tests/test_dataset.py::TestDataset::test_reset_index_keep_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_reorder_levels\", \"xarray/tests/test_dataset.py::TestDataset::test_stack\", \"xarray/tests/test_dataset.py::TestDataset::test_unstack\", \"xarray/tests/test_dataset.py::TestDataset::test_unstack_errors\", \"xarray/tests/test_dataset.py::TestDataset::test_unstack_fill_value\", \"xarray/tests/test_dataset.py::TestDataset::test_unstack_sparse\", \"xarray/tests/test_dataset.py::TestDataset::test_stack_unstack_fast\", \"xarray/tests/test_dataset.py::TestDataset::test_stack_unstack_slow\", \"xarray/tests/test_dataset.py::TestDataset::test_to_stacked_array_invalid_sample_dims\", \"xarray/tests/test_dataset.py::TestDataset::test_to_stacked_array_name\", \"xarray/tests/test_dataset.py::TestDataset::test_to_stacked_array_dtype_dims\", \"xarray/tests/test_dataset.py::TestDataset::test_to_stacked_array_to_unstacked_dataset_different_dimension\", \"xarray/tests/test_dataset.py::TestDataset::test_update\", \"xarray/tests/test_dataset.py::TestDataset::test_update_overwrite_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_update_auto_align\", \"xarray/tests/test_dataset.py::TestDataset::test_getitem\", \"xarray/tests/test_dataset.py::TestDataset::test_getitem_hashable\", \"xarray/tests/test_dataset.py::TestDataset::test_virtual_variables_default_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_virtual_variables_time\", \"xarray/tests/test_dataset.py::TestDataset::test_virtual_variable_same_name\", \"xarray/tests/test_dataset.py::TestDataset::test_virtual_variable_multiindex\", \"xarray/tests/test_dataset.py::TestDataset::test_time_season\", \"xarray/tests/test_dataset.py::TestDataset::test_slice_virtual_variable\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_pandas\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_auto_align\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_dimension_override\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_with_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_align_new_indexes\", \"xarray/tests/test_dataset.py::TestDataset::test_assign\", \"xarray/tests/test_dataset.py::TestDataset::test_assign_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_assign_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_assign_multiindex_level\", \"xarray/tests/test_dataset.py::TestDataset::test_merge_multiindex_level\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_original_non_unique_index\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_both_non_unique_index\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_multiindex_level\", \"xarray/tests/test_dataset.py::TestDataset::test_delitem\", \"xarray/tests/test_dataset.py::TestDataset::test_squeeze\", \"xarray/tests/test_dataset.py::TestDataset::test_squeeze_drop\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby_returns_new_type\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby_iter\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby_errors\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby_reduce\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby_math\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby_math_virtual\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby_nan\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby_order\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_and_first\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_min_count\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_by_mean_with_keep_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_loffset\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_by_mean_discarding_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_by_last_discarding_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_drop_nondim_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_old_api\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_ds_da_are_the_same\", \"xarray/tests/test_dataset.py::TestDataset::test_ds_resample_apply_func_args\", \"xarray/tests/test_dataset.py::TestDataset::test_to_array\", \"xarray/tests/test_dataset.py::TestDataset::test_to_and_from_dataframe\", \"xarray/tests/test_dataset.py::TestDataset::test_from_dataframe_sparse\", \"xarray/tests/test_dataset.py::TestDataset::test_to_and_from_empty_dataframe\", \"xarray/tests/test_dataset.py::TestDataset::test_from_dataframe_non_unique_columns\", \"xarray/tests/test_dataset.py::TestDataset::test_convert_dataframe_with_many_types_and_multiindex\", \"xarray/tests/test_dataset.py::TestDataset::test_to_and_from_dict\", \"xarray/tests/test_dataset.py::TestDataset::test_to_and_from_dict_with_time_dim\", \"xarray/tests/test_dataset.py::TestDataset::test_to_and_from_dict_with_nan_nat\", \"xarray/tests/test_dataset.py::TestDataset::test_to_dict_with_numpy_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_pickle\", \"xarray/tests/test_dataset.py::TestDataset::test_lazy_load\", \"xarray/tests/test_dataset.py::TestDataset::test_dropna\", \"xarray/tests/test_dataset.py::TestDataset::test_fillna\", \"xarray/tests/test_dataset.py::TestDataset::test_where\", \"xarray/tests/test_dataset.py::TestDataset::test_where_other\", \"xarray/tests/test_dataset.py::TestDataset::test_where_drop\", \"xarray/tests/test_dataset.py::TestDataset::test_where_drop_empty\", \"xarray/tests/test_dataset.py::TestDataset::test_where_drop_no_indexes\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_mean_uint_dtype\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_bad_dim\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_cumsum\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_cumsum_test_dims\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_non_numeric\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_strings\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_dtypes\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_keep_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_argmin\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_scalars\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_only_one_axis\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_no_axis\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_keepdims\", \"xarray/tests/test_dataset.py::TestDataset::test_quantile[0.25-True]\", \"xarray/tests/test_dataset.py::TestDataset::test_quantile[0.25-False]\", \"xarray/tests/test_dataset.py::TestDataset::test_quantile[q1-True]\", \"xarray/tests/test_dataset.py::TestDataset::test_quantile[q1-False]\", \"xarray/tests/test_dataset.py::TestDataset::test_quantile[q2-True]\", \"xarray/tests/test_dataset.py::TestDataset::test_quantile[q2-False]\", \"xarray/tests/test_dataset.py::TestDataset::test_quantile_skipna[True]\", \"xarray/tests/test_dataset.py::TestDataset::test_quantile_skipna[False]\", \"xarray/tests/test_dataset.py::TestDataset::test_rank\", \"xarray/tests/test_dataset.py::TestDataset::test_count\", \"xarray/tests/test_dataset.py::TestDataset::test_map\", \"xarray/tests/test_dataset.py::TestDataset::test_apply_pending_deprecated_map\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_number_math\", \"xarray/tests/test_dataset.py::TestDataset::test_unary_ops\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_array_math\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_dataset_math\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_math_auto_align\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_math_errors\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_transpose\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_ellipsis_transpose_different_ordered_vars\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_retains_period_index_on_transpose\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_diff_n1_simple\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_diff_n1_label\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_diff_n1\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_diff_n2\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_diff_exception_n_neg\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_diff_exception_label_str\", \"xarray/tests/test_dataset.py::TestDataset::test_shift[fill_value0]\", \"xarray/tests/test_dataset.py::TestDataset::test_shift[2]\", \"xarray/tests/test_dataset.py::TestDataset::test_shift[2.0]\", \"xarray/tests/test_dataset.py::TestDataset::test_roll_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_roll_no_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_roll_coords_none\", \"xarray/tests/test_dataset.py::TestDataset::test_roll_multidim\", \"xarray/tests/test_dataset.py::TestDataset::test_real_and_imag\", \"xarray/tests/test_dataset.py::TestDataset::test_setattr_raises\", \"xarray/tests/test_dataset.py::TestDataset::test_filter_by_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_binary_op_propagate_indexes\", \"xarray/tests/test_dataset.py::TestDataset::test_binary_op_join_setting\", \"xarray/tests/test_dataset.py::TestDataset::test_full_like\", \"xarray/tests/test_dataset.py::TestDataset::test_combine_first\", \"xarray/tests/test_dataset.py::TestDataset::test_sortby\", \"xarray/tests/test_dataset.py::TestDataset::test_attribute_access\", \"xarray/tests/test_dataset.py::TestDataset::test_ipython_key_completion\", \"xarray/tests/test_dataset.py::TestDataset::test_polyfit_output\", \"xarray/tests/test_dataset.py::TestDataset::test_pad\", \"xarray/tests/test_dataset.py::test_isin[test_elements0]\", \"xarray/tests/test_dataset.py::test_isin[test_elements1]\", \"xarray/tests/test_dataset.py::test_isin[test_elements2]\", \"xarray/tests/test_dataset.py::test_isin_dask[test_elements0]\", \"xarray/tests/test_dataset.py::test_isin_dask[test_elements1]\", \"xarray/tests/test_dataset.py::test_isin_dask[test_elements2]\", \"xarray/tests/test_dataset.py::test_isin_dataset\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords0]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords1]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords2]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords3]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords4]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords5]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords6]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords7]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords8]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords9]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords0]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords1]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords2]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords3]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords4]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords5]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords6]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords7]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords8]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords9]\", \"xarray/tests/test_dataset.py::test_error_message_on_set_supplied\", \"xarray/tests/test_dataset.py::test_constructor_raises_with_invalid_coords[unaligned_coords0]\", \"xarray/tests/test_dataset.py::test_dir_expected_attrs[None]\", \"xarray/tests/test_dataset.py::test_dir_non_string[None]\", \"xarray/tests/test_dataset.py::test_dir_unicode[None]\", \"xarray/tests/test_dataset.py::test_coarsen_absent_dims_error[1]\", \"xarray/tests/test_dataset.py::test_coarsen[1-trim-left-True]\", \"xarray/tests/test_dataset.py::test_coarsen[1-trim-left-False]\", \"xarray/tests/test_dataset.py::test_coarsen[1-pad-right-True]\", \"xarray/tests/test_dataset.py::test_coarsen[1-pad-right-False]\", \"xarray/tests/test_dataset.py::test_coarsen_coords[1-True]\", \"xarray/tests/test_dataset.py::test_coarsen_coords[1-False]\", \"xarray/tests/test_dataset.py::test_coarsen_coords_cftime\", \"xarray/tests/test_dataset.py::test_coarsen_keep_attrs\", \"xarray/tests/test_dataset.py::test_rolling_keep_attrs\", \"xarray/tests/test_dataset.py::test_rolling_properties[1]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-True-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-True-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-True-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-True-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-True-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-True-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-True-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-False-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-False-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-False-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-False-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-False-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-False-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-False-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-None-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-None-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-None-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-None-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-None-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-None-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-None-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-True-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-True-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-True-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-True-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-True-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-True-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-True-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-False-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-False-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-False-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-False-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-False-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-False-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-False-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-None-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-None-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-None-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-None-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-None-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-None-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-None-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-True-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-True-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-True-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-True-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-True-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-True-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-True-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-False-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-False-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-False-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-False-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-False-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-False-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-False-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-None-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-None-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-None-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-None-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-None-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-None-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-None-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-True-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-True-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-True-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-True-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-True-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-True-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-True-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-False-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-False-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-False-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-False-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-False-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-False-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-False-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-None-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-None-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-None-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-None-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-None-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-None-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-None-median]\", \"xarray/tests/test_dataset.py::test_rolling_exp[1]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[1-None-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[1-None-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[1-1-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[1-1-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[1-2-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[1-2-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[1-3-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[1-3-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[2-None-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[2-None-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[2-1-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[2-1-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[2-2-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[2-2-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[2-3-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[2-3-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[3-None-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[3-None-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[3-1-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[3-1-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[3-2-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[3-2-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[3-3-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[3-3-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[4-None-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[4-None-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[4-1-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[4-1-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[4-2-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[4-2-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[4-3-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[4-3-False]\", \"xarray/tests/test_dataset.py::test_rolling_construct[1-True]\", \"xarray/tests/test_dataset.py::test_rolling_construct[1-False]\", \"xarray/tests/test_dataset.py::test_rolling_construct[2-True]\", \"xarray/tests/test_dataset.py::test_rolling_construct[2-False]\", \"xarray/tests/test_dataset.py::test_rolling_construct[3-True]\", \"xarray/tests/test_dataset.py::test_rolling_construct[3-False]\", \"xarray/tests/test_dataset.py::test_rolling_construct[4-True]\", \"xarray/tests/test_dataset.py::test_rolling_construct[4-False]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-3-False-2]\", \"xarray/tests/test_dataset.py::test_raise_no_warning_for_nan_in_binary_ops\", \"xarray/tests/test_dataset.py::test_differentiate[1-False]\", \"xarray/tests/test_dataset.py::test_differentiate[2-False]\", \"xarray/tests/test_dataset.py::test_differentiate_datetime[False]\", \"xarray/tests/test_dataset.py::test_differentiate_cftime[False]\", \"xarray/tests/test_dataset.py::test_integrate[True]\", \"xarray/tests/test_dataset.py::test_integrate[False]\", \"xarray/tests/test_dataset.py::test_trapz_datetime[np-True]\", \"xarray/tests/test_dataset.py::test_trapz_datetime[np-False]\", \"xarray/tests/test_dataset.py::test_trapz_datetime[cftime-True]\", \"xarray/tests/test_dataset.py::test_trapz_datetime[cftime-False]\", \"xarray/tests/test_dataset.py::test_no_dict\", \"xarray/tests/test_dataset.py::test_subclass_slots\", \"xarray/tests/test_dataset.py::test_weakref\"]", + "expected_spans": { + "xarray/core/dataarray.py": [ + "DataArray.to_unstacked_dataset" + ] + }, + "test_file_spans": { + "xarray/tests/test_dataset.py": [ + "TestDataset.test_to_stacked_array_to_unstacked_dataset" + ] + }, + "resolved_by": [ + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "xarray/core/dataarray.py": [ + "DataArray.to_unstacked_dataset" + ] + }, + "alternative_spans": { + "xarray/core/dataarray.py": [ + "DataArray.to_unstacked_dataset" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "xarray/core/dataarray.py": [ + "DataArray.to_unstacked_dataset" + ] + }, + "alternative_spans": { + "xarray/core/dataarray.py": [ + "DataArray.to_unstacked_dataset" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "xarray/core/dataarray.py": [ + "DataArray.to_unstacked_dataset" + ] + }, + "alternative_spans": { + "xarray/core/dataarray.py": [ + "DataArray.to_unstacked_dataset" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-12983", + "repo": "django/django", + "base_commit": "3bc4240d979812bd11365ede04c028ea13fdc8c6", + "problem_statement": "Make django.utils.text.slugify() strip dashes and underscores\nDescription\n\t \n\t\t(last modified by Elinaldo do Nascimento Monteiro)\n\t \nBug generation slug\nExample:\nfrom django.utils import text\ntext.slugify(\"___This is a test ---\")\noutput: ___this-is-a-test-\nImprovement after correction\nfrom django.utils import text\ntext.slugify(\"___This is a test ---\")\noutput: this-is-a-test\n\u200bPR\n", + "golden_patch": "diff --git a/django/utils/text.py b/django/utils/text.py\n--- a/django/utils/text.py\n+++ b/django/utils/text.py\n@@ -393,17 +393,18 @@ def unescape_string_literal(s):\n @keep_lazy_text\n def slugify(value, allow_unicode=False):\n \"\"\"\n- Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.\n- Remove characters that aren't alphanumerics, underscores, or hyphens.\n- Convert to lowercase. Also strip leading and trailing whitespace.\n+ Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated\n+ dashes to single dashes. Remove characters that aren't alphanumerics,\n+ underscores, or hyphens. Convert to lowercase. Also strip leading and\n+ trailing whitespace, dashes, and underscores.\n \"\"\"\n value = str(value)\n if allow_unicode:\n value = unicodedata.normalize('NFKC', value)\n else:\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n- value = re.sub(r'[^\\w\\s-]', '', value.lower()).strip()\n- return re.sub(r'[-\\s]+', '-', value)\n+ value = re.sub(r'[^\\w\\s-]', '', value.lower())\n+ return re.sub(r'[-\\s]+', '-', value).strip('-_')\n \n \n def camel_case_to_spaces(value):\n", + "test_patch": "diff --git a/tests/utils_tests/test_text.py b/tests/utils_tests/test_text.py\n--- a/tests/utils_tests/test_text.py\n+++ b/tests/utils_tests/test_text.py\n@@ -192,6 +192,13 @@ def test_slugify(self):\n # given - expected - Unicode?\n ('Hello, World!', 'hello-world', False),\n ('spam & eggs', 'spam-eggs', False),\n+ (' multiple---dash and space ', 'multiple-dash-and-space', False),\n+ ('\\t whitespace-in-value \\n', 'whitespace-in-value', False),\n+ ('underscore_in-value', 'underscore_in-value', False),\n+ ('__strip__underscore-value___', 'strip__underscore-value', False),\n+ ('--strip-dash-value---', 'strip-dash-value', False),\n+ ('__strip-mixed-value---', 'strip-mixed-value', False),\n+ ('_ -strip-mixed-value _-', 'strip-mixed-value', False),\n ('spam & \u0131\u00e7\u00fc\u015f', 'spam-\u0131\u00e7\u00fc\u015f', True),\n ('foo \u0131\u00e7 bar', 'foo-\u0131\u00e7-bar', True),\n (' foo \u0131\u00e7 bar', 'foo-\u0131\u00e7-bar', True),\n", + "fail_to_pass": "[\"test_slugify (utils_tests.test_text.TestUtilsText)\"]", + "pass_to_pass": "[\"test_compress_sequence (utils_tests.test_text.TestUtilsText)\", \"test_format_lazy (utils_tests.test_text.TestUtilsText)\", \"test_get_text_list (utils_tests.test_text.TestUtilsText)\", \"test_get_valid_filename (utils_tests.test_text.TestUtilsText)\", \"test_normalize_newlines (utils_tests.test_text.TestUtilsText)\", \"test_phone2numeric (utils_tests.test_text.TestUtilsText)\", \"test_smart_split (utils_tests.test_text.TestUtilsText)\", \"test_truncate_chars (utils_tests.test_text.TestUtilsText)\", \"test_truncate_chars_html (utils_tests.test_text.TestUtilsText)\", \"test_truncate_html_words (utils_tests.test_text.TestUtilsText)\", \"test_truncate_words (utils_tests.test_text.TestUtilsText)\", \"test_unescape_entities (utils_tests.test_text.TestUtilsText)\", \"test_unescape_entities_deprecated (utils_tests.test_text.TestUtilsText)\", \"test_unescape_string_literal (utils_tests.test_text.TestUtilsText)\", \"test_wrap (utils_tests.test_text.TestUtilsText)\"]", + "expected_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "test_file_spans": { + "tests/utils_tests/test_text.py": [ + "TestUtilsText.test_slugify" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/utils/text.py": [ + "imports", + "capfirst", + "impl", + "wrap", + "Truncator.__init__", + "Truncator.add_truncation_text", + "Truncator.chars", + "Truncator._text_chars", + "Truncator.words", + "Truncator._truncate_html", + "get_valid_filename", + "get_text_list", + "phone2numeric", + "StreamingBuffer.read", + "compress_sequence", + "impl:11", + "smart_split", + "impl:13", + "unescape_entities", + "unescape_string_literal", + "slugify", + "camel_case_to_spaces" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "imports", + "capfirst", + "impl", + "wrap", + "Truncator.__init__", + "Truncator.add_truncation_text", + "Truncator.chars", + "Truncator._text_chars", + "Truncator.words", + "Truncator._truncate_html", + "get_valid_filename", + "get_text_list", + "phone2numeric", + "StreamingBuffer.read", + "compress_sequence", + "impl:11", + "smart_split", + "impl:13", + "unescape_entities", + "unescape_string_literal", + "slugify", + "camel_case_to_spaces" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ], + "tests/utils_tests/test_text.py": [ + "TestUtilsText.test_slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "django/utils/text.py": [ + "slugify", + "camel_case_to_spaces" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify", + "camel_case_to_spaces" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/utils/text.py": [ + "slugify" + ], + "tests/model_fields/test_slugfield.py": [] + }, + "alternative_spans": { + "django/utils/text.py": [ + "slugify" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "tests/utils_tests/test_text.py": [ + "TestUtilsText.test_slugify" + ] + }, + "alternative_spans": {} + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sphinx-doc__sphinx-7738", + "repo": "sphinx-doc/sphinx", + "base_commit": "c087d717f6ed183dd422359bf91210dc59689d63", + "problem_statement": "overescaped trailing underscore on attribute with napoleon\n**Describe the bug**\r\nAttribute name `hello_` shows up as `hello\\_` in the html (visible backslash) with napoleon.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\nempty `__init__.py`\r\n`a.py` contains\r\n```python\r\nclass A:\r\n \"\"\"\r\n Attributes\r\n ----------\r\n hello_: int\r\n hi\r\n \"\"\"\r\n pass\r\n```\r\nrun `sphinx-quickstart`\r\nadd `'sphinx.ext.autodoc', 'sphinx.ext.napoleon'` to extensions in conf.py.\r\nadd `.. autoclass:: a.A` to index.rst\r\nPYTHONPATH=. make clean html\r\nopen _build/html/index.html in web browser and see the ugly backslash.\r\n\r\n**Expected behavior**\r\nNo backslash, a similar output to what I get for\r\n```rst\r\n .. attribute:: hello_\r\n :type: int\r\n\r\n hi\r\n```\r\n(the type shows up differently as well, but that's not the point here)\r\nOlder versions like 2.4.3 look ok to me.\r\n\r\n**Environment info**\r\n- OS: Linux debian testing\r\n- Python version: 3.8.3\r\n- Sphinx version: 3.0.4\r\n- Sphinx extensions: sphinx.ext.autodoc, sphinx.ext.napoleon\r\n- Extra tools:\n", + "golden_patch": "diff --git a/sphinx/ext/napoleon/docstring.py b/sphinx/ext/napoleon/docstring.py\n--- a/sphinx/ext/napoleon/docstring.py\n+++ b/sphinx/ext/napoleon/docstring.py\n@@ -318,7 +318,7 @@ def _dedent(self, lines: List[str], full: bool = False) -> List[str]:\n return [line[min_indent:] for line in lines]\n \n def _escape_args_and_kwargs(self, name: str) -> str:\n- if name.endswith('_'):\n+ if name.endswith('_') and getattr(self._config, 'strip_signature_backslash', False):\n name = name[:-1] + r'\\_'\n \n if name[:2] == '**':\n", + "test_patch": "diff --git a/tests/test_ext_napoleon_docstring.py b/tests/test_ext_napoleon_docstring.py\n--- a/tests/test_ext_napoleon_docstring.py\n+++ b/tests/test_ext_napoleon_docstring.py\n@@ -1394,6 +1394,26 @@ def test_underscore_in_attribute(self):\n Attributes\n ----------\n \n+arg_ : type\n+ some description\n+\"\"\"\n+\n+ expected = \"\"\"\n+:ivar arg_: some description\n+:vartype arg_: type\n+\"\"\"\n+\n+ config = Config(napoleon_use_ivar=True)\n+ app = mock.Mock()\n+ actual = str(NumpyDocstring(docstring, config, app, \"class\"))\n+\n+ self.assertEqual(expected, actual)\n+\n+ def test_underscore_in_attribute_strip_signature_backslash(self):\n+ docstring = \"\"\"\n+Attributes\n+----------\n+\n arg_ : type\n some description\n \"\"\"\n@@ -1404,6 +1424,7 @@ def test_underscore_in_attribute(self):\n \"\"\"\n \n config = Config(napoleon_use_ivar=True)\n+ config.strip_signature_backslash = True\n app = mock.Mock()\n actual = str(NumpyDocstring(docstring, config, app, \"class\"))\n \n", + "fail_to_pass": "[\"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_underscore_in_attribute\"]", + "pass_to_pass": "[\"tests/test_ext_napoleon_docstring.py::NamedtupleSubclassTest::test_attributes_docstring\", \"tests/test_ext_napoleon_docstring.py::InlineAttributeTest::test_class_data_member\", \"tests/test_ext_napoleon_docstring.py::InlineAttributeTest::test_class_data_member_inline\", \"tests/test_ext_napoleon_docstring.py::InlineAttributeTest::test_class_data_member_inline_no_type\", \"tests/test_ext_napoleon_docstring.py::InlineAttributeTest::test_class_data_member_inline_ref_in_type\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_attributes_with_class_reference\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_code_block_in_returns_section\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_colon_in_return_type\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_custom_generic_sections\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_docstrings\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_kwargs_in_arguments\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_list_in_parameter_description\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_noindex\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_parameters_with_class_reference\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_raises_types\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_section_header_formatting\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_sphinx_admonitions\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_xrefs_in_return_type\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_colon_in_return_type\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_docstrings\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_keywords_with_types\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_list_in_parameter_description\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_parameters_with_class_reference\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_parameters_without_class_reference\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_raises_types\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_section_header_underline_length\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_see_also_refs\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_sphinx_admonitions\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_underscore_in_attribute_strip_signature_backslash\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_xrefs_in_return_type\"]", + "expected_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._escape_args_and_kwargs" + ] + }, + "test_file_spans": { + "tests/test_ext_napoleon_docstring.py": [ + "NumpyDocstringTest.test_underscore_in_attribute" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-19487", + "repo": "sympy/sympy", + "base_commit": "25fbcce5b1a4c7e3956e6062930f4a44ce95a632", + "problem_statement": "Rewrite sign as abs\nIn sympy the `sign` function is defined as\r\n```\r\n sign(z) := z / Abs(z)\r\n```\r\nfor all complex non-zero `z`. There should be a way to rewrite the sign in terms of `Abs` e.g.:\r\n```\r\n>>> sign(x).rewrite(Abs) \r\n x \r\n\u2500\u2500\u2500\r\n\u2502x\u2502\r\n```\r\nI'm not sure how the possibility of `x` being zero should be handled currently we have\r\n```\r\n>>> sign(0) \r\n0\r\n>>> 0 / Abs(0) \r\nnan\r\n```\r\nMaybe `sign(0)` should be `nan` as well. Otherwise maybe rewrite as Abs would have to be careful about the possibility of the arg being zero (that would make the rewrite fail in most cases).\n", + "golden_patch": "diff --git a/sympy/functions/elementary/complexes.py b/sympy/functions/elementary/complexes.py\n--- a/sympy/functions/elementary/complexes.py\n+++ b/sympy/functions/elementary/complexes.py\n@@ -394,6 +394,9 @@ def _eval_rewrite_as_Heaviside(self, arg, **kwargs):\n if arg.is_extended_real:\n return Heaviside(arg, H0=S(1)/2) * 2 - 1\n \n+ def _eval_rewrite_as_Abs(self, arg, **kwargs):\n+ return Piecewise((0, Eq(arg, 0)), (arg / Abs(arg), True))\n+\n def _eval_simplify(self, **kwargs):\n return self.func(self.args[0].factor()) # XXX include doit?\n \n", + "test_patch": "diff --git a/sympy/core/tests/test_subs.py b/sympy/core/tests/test_subs.py\n--- a/sympy/core/tests/test_subs.py\n+++ b/sympy/core/tests/test_subs.py\n@@ -855,3 +855,10 @@ def test_issue_17823():\n def test_issue_19326():\n x, y = [i(t) for i in map(Function, 'xy')]\n assert (x*y).subs({x: 1 + x, y: x}) == (1 + x)*x\n+\n+def test_issue_19558():\n+ e = (7*x*cos(x) - 12*log(x)**3)*(-log(x)**4 + 2*sin(x) + 1)**2/ \\\n+ (2*(x*cos(x) - 2*log(x)**3)*(3*log(x)**4 - 7*sin(x) + 3)**2)\n+\n+ assert e.subs(x, oo) == AccumBounds(-oo, oo)\n+ assert (sin(x) + cos(x)).subs(x, oo) == AccumBounds(-2, 2)\ndiff --git a/sympy/functions/elementary/tests/test_complexes.py b/sympy/functions/elementary/tests/test_complexes.py\n--- a/sympy/functions/elementary/tests/test_complexes.py\n+++ b/sympy/functions/elementary/tests/test_complexes.py\n@@ -4,7 +4,7 @@\n pi, Rational, re, S, sign, sin, sqrt, Symbol, symbols, transpose,\n zoo, exp_polar, Piecewise, Interval, comp, Integral, Matrix,\n ImmutableMatrix, SparseMatrix, ImmutableSparseMatrix, MatrixSymbol,\n- FunctionMatrix, Lambda, Derivative)\n+ FunctionMatrix, Lambda, Derivative, Eq)\n from sympy.core.expr import unchanged\n from sympy.core.function import ArgumentIndexError\n from sympy.testing.pytest import XFAIL, raises\n@@ -296,11 +296,14 @@ def test_sign():\n assert sign(Symbol('x', real=True, zero=False)).is_nonpositive is None\n \n x, y = Symbol('x', real=True), Symbol('y')\n+ f = Function('f')\n assert sign(x).rewrite(Piecewise) == \\\n Piecewise((1, x > 0), (-1, x < 0), (0, True))\n assert sign(y).rewrite(Piecewise) == sign(y)\n assert sign(x).rewrite(Heaviside) == 2*Heaviside(x, H0=S(1)/2) - 1\n assert sign(y).rewrite(Heaviside) == sign(y)\n+ assert sign(y).rewrite(Abs) == Piecewise((0, Eq(y, 0)), (y/Abs(y), True))\n+ assert sign(f(y)).rewrite(Abs) == Piecewise((0, Eq(f(y), 0)), (f(y)/Abs(f(y)), True))\n \n # evaluate what can be evaluated\n assert sign(exp_polar(I*pi)*pi) is S.NegativeOne\n", + "fail_to_pass": "[\"test_sign\"]", + "pass_to_pass": "[\"test_subs\", \"test_subs_Matrix\", \"test_subs_AccumBounds\", \"test_trigonometric\", \"test_powers\", \"test_logexppow\", \"test_bug\", \"test_subbug1\", \"test_subbug2\", \"test_dict_set\", \"test_dict_ambigous\", \"test_deriv_sub_bug3\", \"test_equality_subs1\", \"test_equality_subs2\", \"test_issue_3742\", \"test_subs_dict1\", \"test_mul\", \"test_subs_simple\", \"test_subs_constants\", \"test_subs_commutative\", \"test_subs_noncommutative\", \"test_subs_basic_funcs\", \"test_subs_wild\", \"test_subs_mixed\", \"test_division\", \"test_add\", \"test_subs_issue_4009\", \"test_functions_subs\", \"test_derivative_subs\", \"test_derivative_subs2\", \"test_derivative_subs3\", \"test_issue_5284\", \"test_subs_iter\", \"test_subs_dict\", \"test_no_arith_subs_on_floats\", \"test_issue_5651\", \"test_issue_6075\", \"test_issue_6079\", \"test_issue_4680\", \"test_issue_6158\", \"test_Function_subs\", \"test_simultaneous_subs\", \"test_issue_6419_6421\", \"test_issue_6559\", \"test_issue_5261\", \"test_issue_6923\", \"test_2arg_hack\", \"test_noncommutative_subs\", \"test_issue_2877\", \"test_issue_5910\", \"test_issue_5217\", \"test_issue_10829\", \"test_pow_eval_subs_no_cache\", \"test_RootOf_issue_10092\", \"test_issue_8886\", \"test_issue_12657\", \"test_recurse_Application_args\", \"test_Subs_subs\", \"test_issue_13333\", \"test_issue_15234\", \"test_issue_6976\", \"test_issue_11746\", \"test_issue_17823\", \"test_issue_19326\", \"test_re\", \"test_im\", \"test_as_real_imag\", \"test_Abs\", \"test_Abs_rewrite\", \"test_Abs_real\", \"test_Abs_properties\", \"test_abs\", \"test_arg\", \"test_arg_rewrite\", \"test_adjoint\", \"test_conjugate\", \"test_conjugate_transpose\", \"test_transpose\", \"test_polarify\", \"test_unpolarify\", \"test_issue_4035\", \"test_issue_3206\", \"test_issue_4754_derivative_conjugate\", \"test_derivatives_issue_4757\", \"test_issue_11413\", \"test_periodic_argument\", \"test_principal_branch\", \"test_issue_14216\", \"test_issue_14238\", \"test_zero_assumptions\"]", + "expected_spans": { + "sympy/functions/elementary/complexes.py": [ + "sign._eval_simplify" + ] + }, + "test_file_spans": { + "sympy/core/tests/test_subs.py": [], + "sympy/functions/elementary/tests/test_complexes.py": [ + "imports", + "test_sign" + ] + }, + "resolved_by": [ + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sympy/functions/elementary/complexes.py": [ + "sign._eval_simplify" + ], + "sympy/functions/elementary/tests/test_complexes.py": [ + "test_sign" + ] + }, + "alternative_spans": { + "sympy/functions/elementary/complexes.py": [ + "sign._eval_simplify" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-13028", + "repo": "django/django", + "base_commit": "78ad4b4b0201003792bfdbf1a7781cbc9ee03539", + "problem_statement": "Queryset raises NotSupportedError when RHS has filterable=False attribute.\nDescription\n\t \n\t\t(last modified by Nicolas Baccelli)\n\t \nI'm migrating my app to django 3.0.7 and I hit a strange behavior using a model class with a field labeled filterable\nclass ProductMetaDataType(models.Model):\n\tlabel = models.CharField(max_length=255, unique=True, blank=False, null=False)\n\tfilterable = models.BooleanField(default=False, verbose_name=_(\"filterable\"))\n\tclass Meta:\n\t\tapp_label = \"adminpricing\"\n\t\tverbose_name = _(\"product meta data type\")\n\t\tverbose_name_plural = _(\"product meta data types\")\n\tdef __str__(self):\n\t\treturn self.label\nclass ProductMetaData(models.Model):\n\tid = models.BigAutoField(primary_key=True)\n\tproduct = models.ForeignKey(\n\t\tProduit, null=False, blank=False, on_delete=models.CASCADE\n\t)\n\tvalue = models.TextField(null=False, blank=False)\n\tmarketplace = models.ForeignKey(\n\t\tPlateforme, null=False, blank=False, on_delete=models.CASCADE\n\t)\n\tdate_created = models.DateTimeField(null=True, default=timezone.now)\n\tmetadata_type = models.ForeignKey(\n\t\tProductMetaDataType, null=False, blank=False, on_delete=models.CASCADE\n\t)\n\tclass Meta:\n\t\tapp_label = \"adminpricing\"\n\t\tverbose_name = _(\"product meta data\")\n\t\tverbose_name_plural = _(\"product meta datas\")\nError happened when filtering ProductMetaData with a metadata_type :\nProductMetaData.objects.filter(value=\"Dark Vador\", metadata_type=self.brand_metadata)\nError traceback :\nTraceback (most recent call last):\n File \"/backoffice/backoffice/adminpricing/tests/test_pw.py\", line 481, in test_checkpolicywarning_by_fields\n\tfor p in ProductMetaData.objects.filter(\n File \"/usr/local/lib/python3.8/site-packages/django/db/models/manager.py\", line 82, in manager_method\n\treturn getattr(self.get_queryset(), name)(*args, **kwargs)\n File \"/usr/local/lib/python3.8/site-packages/django/db/models/query.py\", line 904, in filter\n\treturn self._filter_or_exclude(False, *args, **kwargs)\n File \"/usr/local/lib/python3.8/site-packages/django/db/models/query.py\", line 923, in _filter_or_exclude\n\tclone.query.add_q(Q(*args, **kwargs))\n File \"/usr/local/lib/python3.8/site-packages/django/db/models/sql/query.py\", line 1351, in add_q\n\tclause, _ = self._add_q(q_object, self.used_aliases)\n File \"/usr/local/lib/python3.8/site-packages/django/db/models/sql/query.py\", line 1378, in _add_q\n\tchild_clause, needed_inner = self.build_filter(\n File \"/usr/local/lib/python3.8/site-packages/django/db/models/sql/query.py\", line 1264, in build_filter\n\tself.check_filterable(value)\n File \"/usr/local/lib/python3.8/site-packages/django/db/models/sql/query.py\", line 1131, in check_filterable\n\traise NotSupportedError(\ndjango.db.utils.NotSupportedError: ProductMetaDataType is disallowed in the filter clause.\nI changed label to filterable_test and it fixed this issue\nThis should be documented or fix.\n", + "golden_patch": "diff --git a/django/db/models/sql/query.py b/django/db/models/sql/query.py\n--- a/django/db/models/sql/query.py\n+++ b/django/db/models/sql/query.py\n@@ -1124,7 +1124,10 @@ def check_related_objects(self, field, value, opts):\n \n def check_filterable(self, expression):\n \"\"\"Raise an error if expression cannot be used in a WHERE clause.\"\"\"\n- if not getattr(expression, 'filterable', True):\n+ if (\n+ hasattr(expression, 'resolve_expression') and\n+ not getattr(expression, 'filterable', True)\n+ ):\n raise NotSupportedError(\n expression.__class__.__name__ + ' is disallowed in the filter '\n 'clause.'\n", + "test_patch": "diff --git a/tests/queries/models.py b/tests/queries/models.py\n--- a/tests/queries/models.py\n+++ b/tests/queries/models.py\n@@ -68,6 +68,7 @@ class ExtraInfo(models.Model):\n note = models.ForeignKey(Note, models.CASCADE, null=True)\n value = models.IntegerField(null=True)\n date = models.ForeignKey(DateTimePK, models.SET_NULL, null=True)\n+ filterable = models.BooleanField(default=True)\n \n class Meta:\n ordering = ['info']\ndiff --git a/tests/queries/tests.py b/tests/queries/tests.py\n--- a/tests/queries/tests.py\n+++ b/tests/queries/tests.py\n@@ -56,12 +56,12 @@ def setUpTestData(cls):\n \n # Create these out of order so that sorting by 'id' will be different to sorting\n # by 'info'. Helps detect some problems later.\n- cls.e2 = ExtraInfo.objects.create(info='e2', note=cls.n2, value=41)\n+ cls.e2 = ExtraInfo.objects.create(info='e2', note=cls.n2, value=41, filterable=False)\n e1 = ExtraInfo.objects.create(info='e1', note=cls.n1, value=42)\n \n cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1)\n cls.a2 = Author.objects.create(name='a2', num=2002, extra=e1)\n- a3 = Author.objects.create(name='a3', num=3003, extra=cls.e2)\n+ cls.a3 = Author.objects.create(name='a3', num=3003, extra=cls.e2)\n cls.a4 = Author.objects.create(name='a4', num=4004, extra=cls.e2)\n \n cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)\n@@ -77,7 +77,7 @@ def setUpTestData(cls):\n i4.tags.set([t4])\n \n cls.r1 = Report.objects.create(name='r1', creator=cls.a1)\n- Report.objects.create(name='r2', creator=a3)\n+ Report.objects.create(name='r2', creator=cls.a3)\n Report.objects.create(name='r3')\n \n # Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering\n@@ -1210,6 +1210,12 @@ def test_excluded_intermediary_m2m_table_joined(self):\n [],\n )\n \n+ def test_field_with_filterable(self):\n+ self.assertSequenceEqual(\n+ Author.objects.filter(extra=self.e2),\n+ [self.a3, self.a4],\n+ )\n+\n \n class Queries2Tests(TestCase):\n @classmethod\n", + "fail_to_pass": "[\"test_field_with_filterable (queries.tests.Queries1Tests)\", \"test_ticket8439 (queries.tests.Queries1Tests)\"]", + "pass_to_pass": "[\"#13227 -- If a queryset is already evaluated, it can still be used as a query arg\", \"test_no_fields_cloning (queries.tests.CloneTests)\", \"test_no_model_options_cloning (queries.tests.CloneTests)\", \"test_no_extra_params (queries.tests.DefaultValuesInsertTest)\", \"test_ticket_7302 (queries.tests.EscapingTests)\", \"test_ticket7371 (queries.tests.CustomPkTests)\", \"test_ticket22023 (queries.tests.Queries3Tests)\", \"test_ticket7107 (queries.tests.Queries3Tests)\", \"test_ticket8683 (queries.tests.Queries3Tests)\", \"test_ticket_21203 (queries.tests.Ticket21203Tests)\", \"test_empty_string_promotion (queries.tests.EmptyStringPromotionTests)\", \"test_invalid_values (queries.tests.TestInvalidValuesRelation)\", \"test_emptyqueryset_values (queries.tests.EmptyQuerySetTests)\", \"test_ticket_19151 (queries.tests.EmptyQuerySetTests)\", \"test_values_subquery (queries.tests.EmptyQuerySetTests)\", \"test_invalid_order_by (queries.tests.QuerySetExceptionTests)\", \"test_invalid_queryset_model (queries.tests.QuerySetExceptionTests)\", \"test_iter_exceptions (queries.tests.QuerySetExceptionTests)\", \"test_evaluated_proxy_count (queries.tests.ProxyQueryCleanupTest)\", \"test_ticket_21879 (queries.tests.ReverseM2MCustomPkTests)\", \"test_in_list_limit (queries.tests.ConditionalTests)\", \"test_infinite_loop (queries.tests.ConditionalTests)\", \"test_empty_full_handling_conjunction (queries.tests.WhereNodeTest)\", \"test_empty_full_handling_disjunction (queries.tests.WhereNodeTest)\", \"test_empty_nodes (queries.tests.WhereNodeTest)\", \"test_ticket_24278 (queries.tests.TestTicket24279)\", \"test_ticket_24605 (queries.tests.TestTicket24605)\", \"test_ticket_19964 (queries.tests.RelabelCloneTest)\", \"test_ticket10432 (queries.tests.GeneratorExpressionTests)\", \"test_ticket7778 (queries.tests.SubclassFKTests)\", \"test_ticket7872 (queries.tests.DisjunctiveFilterTests)\", \"test_ticket8283 (queries.tests.DisjunctiveFilterTests)\", \"test_annotated_ordering (queries.tests.QuerysetOrderedTests)\", \"test_cleared_default_ordering (queries.tests.QuerysetOrderedTests)\", \"test_empty_queryset (queries.tests.QuerysetOrderedTests)\", \"test_explicit_ordering (queries.tests.QuerysetOrderedTests)\", \"test_no_default_or_explicit_ordering (queries.tests.QuerysetOrderedTests)\", \"test_order_by_extra (queries.tests.QuerysetOrderedTests)\", \"test_ticket14729 (queries.tests.RawQueriesTests)\", \"test_double_subquery_in (queries.tests.DoubleInSubqueryTests)\", \"test_tickets_3045_3288 (queries.tests.SelectRelatedTests)\", \"test_reverse_trimming (queries.tests.ReverseJoinTrimmingTest)\", \"test_exists (queries.tests.ExistsSql)\", \"test_ticket_18414 (queries.tests.ExistsSql)\", \"test_ticket_18785 (queries.tests.Ticket18785Tests)\", \"test_ticket_20101 (queries.tests.Ticket20101Tests)\", \"test_ticket_14056 (queries.tests.Ticket14056Tests)\", \"test_primary_key (queries.tests.IsNullTests)\", \"test_to_field (queries.tests.IsNullTests)\", \"test_ticket_20788 (queries.tests.Ticket20788Tests)\", \"test_ticket8597 (queries.tests.ComparisonTests)\", \"test_values_in_subquery (queries.tests.ValuesSubqueryTests)\", \"test_21001 (queries.tests.EmptyStringsAsNullTest)\", \"test_direct_exclude (queries.tests.EmptyStringsAsNullTest)\", \"test_joined_exclude (queries.tests.EmptyStringsAsNullTest)\", \"test_ticket15786 (queries.tests.Exclude15786)\", \"test_ticket_12807 (queries.tests.Ticket12807Tests)\", \"test_join_already_in_query (queries.tests.NullableRelOrderingTests)\", \"test_ticket10028 (queries.tests.NullableRelOrderingTests)\", \"test_ticket_21787 (queries.tests.ForeignKeyToBaseExcludeTests)\", \"test_exclude_plain (queries.tests.ExcludeTest17600)\", \"test_exclude_plain_distinct (queries.tests.ExcludeTest17600)\", \"test_exclude_with_q_is_equal_to_plain_exclude (queries.tests.ExcludeTest17600)\", \"test_exclude_with_q_is_equal_to_plain_exclude_variation (queries.tests.ExcludeTest17600)\", \"test_exclude_with_q_object_distinct (queries.tests.ExcludeTest17600)\", \"test_exclude_with_q_object_no_distinct (queries.tests.ExcludeTest17600)\", \"test_or_with_both_slice (queries.tests.QuerySetBitwiseOperationTests)\", \"test_or_with_both_slice_and_ordering (queries.tests.QuerySetBitwiseOperationTests)\", \"test_or_with_lhs_slice (queries.tests.QuerySetBitwiseOperationTests)\", \"test_or_with_rhs_slice (queries.tests.QuerySetBitwiseOperationTests)\", \"test_non_nullable_fk_not_promoted (queries.tests.ValuesJoinPromotionTests)\", \"test_ticket_21376 (queries.tests.ValuesJoinPromotionTests)\", \"test_values_no_promotion_for_existing (queries.tests.ValuesJoinPromotionTests)\", \"test_double_exclude (queries.tests.NullInExcludeTest)\", \"test_null_in_exclude_qs (queries.tests.NullInExcludeTest)\", \"test_correct_lookup (queries.tests.RelatedLookupTypeTests)\", \"test_values_queryset_lookup (queries.tests.RelatedLookupTypeTests)\", \"test_wrong_backward_lookup (queries.tests.RelatedLookupTypeTests)\", \"test_wrong_type_lookup (queries.tests.RelatedLookupTypeTests)\", \"test_exclude_many_to_many (queries.tests.ManyToManyExcludeTest)\", \"test_ticket_12823 (queries.tests.ManyToManyExcludeTest)\", \"test_ticket_20955 (queries.tests.Ticket20955Tests)\", \"test_ticket_22429 (queries.tests.Ticket22429Tests)\", \"test_ticket_23605 (queries.tests.Ticket23605Tests)\", \"test_fk_reuse (queries.tests.JoinReuseTest)\", \"test_fk_reuse_annotation (queries.tests.JoinReuseTest)\", \"test_fk_reuse_disjunction (queries.tests.JoinReuseTest)\", \"test_fk_reuse_order_by (queries.tests.JoinReuseTest)\", \"test_fk_reuse_select_related (queries.tests.JoinReuseTest)\", \"test_inverted_q_across_relations (queries.tests.JoinReuseTest)\", \"test_revfk_noreuse (queries.tests.JoinReuseTest)\", \"test_revo2o_reuse (queries.tests.JoinReuseTest)\", \"test_distinct_ordered_sliced_subquery_aggregation (queries.tests.Queries6Tests)\", \"test_multiple_columns_with_the_same_name_slice (queries.tests.Queries6Tests)\", \"test_nested_queries_sql (queries.tests.Queries6Tests)\", \"test_parallel_iterators (queries.tests.Queries6Tests)\", \"test_ticket3739 (queries.tests.Queries6Tests)\", \"test_ticket_11320 (queries.tests.Queries6Tests)\", \"test_tickets_8921_9188 (queries.tests.Queries6Tests)\", \"test_empty_resultset_sql (queries.tests.WeirdQuerysetSlicingTests)\", \"test_empty_sliced_subquery (queries.tests.WeirdQuerysetSlicingTests)\", \"test_empty_sliced_subquery_exclude (queries.tests.WeirdQuerysetSlicingTests)\", \"test_tickets_7698_10202 (queries.tests.WeirdQuerysetSlicingTests)\", \"test_zero_length_values_slicing (queries.tests.WeirdQuerysetSlicingTests)\", \"test_ticket12239 (queries.tests.Queries2Tests)\", \"test_ticket4289 (queries.tests.Queries2Tests)\", \"test_ticket7759 (queries.tests.Queries2Tests)\", \"test_can_combine_queries_using_and_and_or_operators (queries.tests.QuerySetSupportsPythonIdioms)\", \"test_can_get_items_using_index_and_slice_notation (queries.tests.QuerySetSupportsPythonIdioms)\", \"test_can_get_number_of_items_in_queryset_using_standard_len (queries.tests.QuerySetSupportsPythonIdioms)\", \"test_invalid_index (queries.tests.QuerySetSupportsPythonIdioms)\", \"test_slicing_can_slice_again_after_slicing (queries.tests.QuerySetSupportsPythonIdioms)\", \"test_slicing_cannot_combine_queries_once_sliced (queries.tests.QuerySetSupportsPythonIdioms)\", \"test_slicing_cannot_filter_queryset_once_sliced (queries.tests.QuerySetSupportsPythonIdioms)\", \"test_slicing_cannot_reorder_queryset_once_sliced (queries.tests.QuerySetSupportsPythonIdioms)\", \"hint: inverting your ordering might do what you need\", \"test_slicing_with_steps_can_be_used (queries.tests.QuerySetSupportsPythonIdioms)\", \"test_slicing_with_tests_is_not_lazy (queries.tests.QuerySetSupportsPythonIdioms)\", \"test_slicing_without_step_is_lazy (queries.tests.QuerySetSupportsPythonIdioms)\", \"test_in_query (queries.tests.ToFieldTests)\", \"test_in_subquery (queries.tests.ToFieldTests)\", \"test_nested_in_subquery (queries.tests.ToFieldTests)\", \"test_recursive_fk (queries.tests.ToFieldTests)\", \"test_recursive_fk_reverse (queries.tests.ToFieldTests)\", \"test_reverse_in (queries.tests.ToFieldTests)\", \"test_single_object (queries.tests.ToFieldTests)\", \"test_single_object_reverse (queries.tests.ToFieldTests)\", \"test_AB_ACB (queries.tests.UnionTests)\", \"test_A_AB (queries.tests.UnionTests)\", \"test_A_AB2 (queries.tests.UnionTests)\", \"test_BAB_BAC (queries.tests.UnionTests)\", \"test_BAB_BACB (queries.tests.UnionTests)\", \"test_BA_BCA__BAB_BAC_BCA (queries.tests.UnionTests)\", \"test_extra_multiple_select_params_values_order_by (queries.tests.ValuesQuerysetTests)\", \"test_extra_select_params_values_order_in_extra (queries.tests.ValuesQuerysetTests)\", \"test_extra_values (queries.tests.ValuesQuerysetTests)\", \"test_extra_values_list (queries.tests.ValuesQuerysetTests)\", \"test_extra_values_order_in_extra (queries.tests.ValuesQuerysetTests)\", \"test_extra_values_order_multiple (queries.tests.ValuesQuerysetTests)\", \"test_extra_values_order_twice (queries.tests.ValuesQuerysetTests)\", \"test_field_error_values_list (queries.tests.ValuesQuerysetTests)\", \"test_flat_extra_values_list (queries.tests.ValuesQuerysetTests)\", \"test_flat_values_list (queries.tests.ValuesQuerysetTests)\", \"test_named_values_list_bad_field_name (queries.tests.ValuesQuerysetTests)\", \"test_named_values_list_expression (queries.tests.ValuesQuerysetTests)\", \"test_named_values_list_expression_with_default_alias (queries.tests.ValuesQuerysetTests)\", \"test_named_values_list_flat (queries.tests.ValuesQuerysetTests)\", \"test_named_values_list_with_fields (queries.tests.ValuesQuerysetTests)\", \"test_named_values_list_without_fields (queries.tests.ValuesQuerysetTests)\", \"test_disjunction_promotion1 (queries.tests.DisjunctionPromotionTests)\", \"test_disjunction_promotion2 (queries.tests.DisjunctionPromotionTests)\", \"test_disjunction_promotion3 (queries.tests.DisjunctionPromotionTests)\", \"test_disjunction_promotion3_demote (queries.tests.DisjunctionPromotionTests)\", \"test_disjunction_promotion4 (queries.tests.DisjunctionPromotionTests)\", \"test_disjunction_promotion4_demote (queries.tests.DisjunctionPromotionTests)\", \"test_disjunction_promotion5_demote (queries.tests.DisjunctionPromotionTests)\", \"test_disjunction_promotion6 (queries.tests.DisjunctionPromotionTests)\", \"test_disjunction_promotion7 (queries.tests.DisjunctionPromotionTests)\", \"test_disjunction_promotion_fexpression (queries.tests.DisjunctionPromotionTests)\", \"test_disjunction_promotion_select_related (queries.tests.DisjunctionPromotionTests)\", \"test_exclude_reverse_fk_field_ref (queries.tests.ExcludeTests)\", \"test_exclude_with_circular_fk_relation (queries.tests.ExcludeTests)\", \"test_subquery_exclude_outerref (queries.tests.ExcludeTests)\", \"test_ticket14511 (queries.tests.ExcludeTests)\", \"test_to_field (queries.tests.ExcludeTests)\", \"test_extra_select_literal_percent_s (queries.tests.Queries5Tests)\", \"test_ordering (queries.tests.Queries5Tests)\", \"test_ticket5261 (queries.tests.Queries5Tests)\", \"test_ticket7045 (queries.tests.Queries5Tests)\", \"test_ticket7256 (queries.tests.Queries5Tests)\", \"test_ticket9848 (queries.tests.Queries5Tests)\", \"test_distinct_ordered_sliced_subquery (queries.tests.SubqueryTests)\", \"Subselects honor any manual ordering\", \"test_related_sliced_subquery (queries.tests.SubqueryTests)\", \"test_slice_subquery_and_query (queries.tests.SubqueryTests)\", \"Delete queries can safely contain sliced subqueries\", \"test_isnull_filter_promotion (queries.tests.NullJoinPromotionOrTest)\", \"test_null_join_demotion (queries.tests.NullJoinPromotionOrTest)\", \"test_ticket_17886 (queries.tests.NullJoinPromotionOrTest)\", \"test_ticket_21366 (queries.tests.NullJoinPromotionOrTest)\", \"test_ticket_21748 (queries.tests.NullJoinPromotionOrTest)\", \"test_ticket_21748_complex_filter (queries.tests.NullJoinPromotionOrTest)\", \"test_ticket_21748_double_negated_and (queries.tests.NullJoinPromotionOrTest)\", \"test_ticket_21748_double_negated_or (queries.tests.NullJoinPromotionOrTest)\", \"test_combine_join_reuse (queries.tests.Queries4Tests)\", \"test_filter_reverse_non_integer_pk (queries.tests.Queries4Tests)\", \"test_join_reuse_order (queries.tests.Queries4Tests)\", \"test_order_by_resetting (queries.tests.Queries4Tests)\", \"test_order_by_reverse_fk (queries.tests.Queries4Tests)\", \"test_ticket10181 (queries.tests.Queries4Tests)\", \"test_ticket11811 (queries.tests.Queries4Tests)\", \"test_ticket14876 (queries.tests.Queries4Tests)\", \"test_ticket15316_exclude_false (queries.tests.Queries4Tests)\", \"test_ticket15316_exclude_true (queries.tests.Queries4Tests)\", \"test_ticket15316_filter_false (queries.tests.Queries4Tests)\", \"test_ticket15316_filter_true (queries.tests.Queries4Tests)\", \"test_ticket15316_one2one_exclude_false (queries.tests.Queries4Tests)\", \"test_ticket15316_one2one_exclude_true (queries.tests.Queries4Tests)\", \"test_ticket15316_one2one_filter_false (queries.tests.Queries4Tests)\", \"test_ticket15316_one2one_filter_true (queries.tests.Queries4Tests)\", \"test_ticket24525 (queries.tests.Queries4Tests)\", \"test_ticket7095 (queries.tests.Queries4Tests)\", \"test_avoid_infinite_loop_on_too_many_subqueries (queries.tests.Queries1Tests)\", \"test_common_mixed_case_foreign_keys (queries.tests.Queries1Tests)\", \"test_deferred_load_qs_pickling (queries.tests.Queries1Tests)\", \"test_double_exclude (queries.tests.Queries1Tests)\", \"test_error_raised_on_filter_with_dictionary (queries.tests.Queries1Tests)\", \"test_exclude (queries.tests.Queries1Tests)\", \"test_exclude_in (queries.tests.Queries1Tests)\", \"test_excluded_intermediary_m2m_table_joined (queries.tests.Queries1Tests)\", \"test_get_clears_ordering (queries.tests.Queries1Tests)\", \"test_heterogeneous_qs_combination (queries.tests.Queries1Tests)\", \"test_lookup_constraint_fielderror (queries.tests.Queries1Tests)\", \"test_nested_exclude (queries.tests.Queries1Tests)\", \"test_order_by_join_unref (queries.tests.Queries1Tests)\", \"test_order_by_raw_column_alias_warning (queries.tests.Queries1Tests)\", \"test_order_by_rawsql (queries.tests.Queries1Tests)\", \"test_order_by_tables (queries.tests.Queries1Tests)\", \"test_reasonable_number_of_subq_aliases (queries.tests.Queries1Tests)\", \"test_subquery_condition (queries.tests.Queries1Tests)\", \"test_ticket10205 (queries.tests.Queries1Tests)\", \"test_ticket10432 (queries.tests.Queries1Tests)\", \"test_ticket1050 (queries.tests.Queries1Tests)\", \"test_ticket10742 (queries.tests.Queries1Tests)\", \"test_ticket17429 (queries.tests.Queries1Tests)\", \"test_ticket1801 (queries.tests.Queries1Tests)\", \"test_ticket19672 (queries.tests.Queries1Tests)\", \"test_ticket2091 (queries.tests.Queries1Tests)\", \"test_ticket2253 (queries.tests.Queries1Tests)\", \"test_ticket2306 (queries.tests.Queries1Tests)\", \"test_ticket2400 (queries.tests.Queries1Tests)\", \"test_ticket2496 (queries.tests.Queries1Tests)\", \"test_ticket3037 (queries.tests.Queries1Tests)\", \"test_ticket3141 (queries.tests.Queries1Tests)\", \"test_ticket4358 (queries.tests.Queries1Tests)\", \"test_ticket4464 (queries.tests.Queries1Tests)\", \"test_ticket4510 (queries.tests.Queries1Tests)\", \"test_ticket6074 (queries.tests.Queries1Tests)\", \"test_ticket6154 (queries.tests.Queries1Tests)\", \"test_ticket6981 (queries.tests.Queries1Tests)\", \"test_ticket7076 (queries.tests.Queries1Tests)\", \"test_ticket7096 (queries.tests.Queries1Tests)\", \"test_ticket7098 (queries.tests.Queries1Tests)\", \"test_ticket7155 (queries.tests.Queries1Tests)\", \"test_ticket7181 (queries.tests.Queries1Tests)\", \"test_ticket7235 (queries.tests.Queries1Tests)\", \"test_ticket7277 (queries.tests.Queries1Tests)\", \"test_ticket7323 (queries.tests.Queries1Tests)\", \"test_ticket7378 (queries.tests.Queries1Tests)\", \"test_ticket7791 (queries.tests.Queries1Tests)\", \"test_ticket7813 (queries.tests.Queries1Tests)\", \"test_ticket9411 (queries.tests.Queries1Tests)\", \"test_ticket9926 (queries.tests.Queries1Tests)\", \"test_ticket9985 (queries.tests.Queries1Tests)\", \"test_ticket9997 (queries.tests.Queries1Tests)\", \"test_ticket_10790_1 (queries.tests.Queries1Tests)\", \"test_ticket_10790_2 (queries.tests.Queries1Tests)\", \"test_ticket_10790_3 (queries.tests.Queries1Tests)\", \"test_ticket_10790_4 (queries.tests.Queries1Tests)\", \"test_ticket_10790_5 (queries.tests.Queries1Tests)\", \"test_ticket_10790_6 (queries.tests.Queries1Tests)\", \"test_ticket_10790_7 (queries.tests.Queries1Tests)\", \"test_ticket_10790_8 (queries.tests.Queries1Tests)\", \"test_ticket_10790_combine (queries.tests.Queries1Tests)\", \"test_ticket_20250 (queries.tests.Queries1Tests)\", \"test_tickets_1878_2939 (queries.tests.Queries1Tests)\", \"test_tickets_2076_7256 (queries.tests.Queries1Tests)\", \"test_tickets_2080_3592 (queries.tests.Queries1Tests)\", \"test_tickets_2874_3002 (queries.tests.Queries1Tests)\", \"test_tickets_4088_4306 (queries.tests.Queries1Tests)\", \"test_tickets_5321_7070 (queries.tests.Queries1Tests)\", \"test_tickets_5324_6704 (queries.tests.Queries1Tests)\", \"test_tickets_6180_6203 (queries.tests.Queries1Tests)\", \"test_tickets_7087_12242 (queries.tests.Queries1Tests)\", \"test_tickets_7204_7506 (queries.tests.Queries1Tests)\", \"test_tickets_7448_7707 (queries.tests.Queries1Tests)\"]", + "expected_spans": { + "django/db/models/sql/query.py": [ + "Query.check_filterable" + ] + }, + "test_file_spans": { + "tests/queries/models.py": [ + "ExtraInfo" + ], + "tests/queries/tests.py": [ + "Queries1Tests.setUpTestData", + "Queries1Tests" + ] + }, + "resolved_by": [ + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.check_filterable" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.check_filterable" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.check_filterable" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.check_filterable" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/db/models/sql/query.py": [ + "docstring", + "Query.check_filterable" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "docstring", + "Query.check_filterable" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/db/models/sql/query.py": [ + "imports", + "get_field_names_from_opts", + "impl:3", + "RawQuery.__init__", + "RawQuery.get_columns", + "RawQuery.__iter__", + "RawQuery.__repr__", + "RawQuery._execute_query", + "Query", + "Query.__init__", + "Query.has_select_fields", + "Query.get_compiler", + "Query.clone", + "Query.rewrite_cols", + "Query.get_aggregation", + "Query.has_results", + "Query.combine", + "Query.deferred_to_data", + "Query.table_alias", + "Query.promote_joins", + "Query.change_aliases", + "Query.bump_prefix", + "Query.count_active_tables", + "Query.join", + "Query.join_parent_model", + "Query.add_annotation", + "Query.resolve_expression", + "Query.get_external_cols", + "Query.resolve_lookup_value", + "Query.solve_lookup_type", + "Query.check_query_object_type", + "Query.check_related_objects", + "Query.check_filterable", + "Query.build_lookup", + "Query.try_transform", + "Query.build_filter", + "Query.add_q", + "Query._add_q", + "Query.build_filtered_relation_q", + "Query.add_filtered_relation", + "Query.names_to_path", + "Query.setup_joins", + "Query.trim_joins", + "Query.resolve_ref", + "Query.split_exclude", + "Query.set_limits", + "Query.has_limit_one", + "Query.add_fields", + "Query.add_ordering", + "Query.set_group_by", + "Query.add_extra", + "Query.clear_deferred_loading", + "Query.add_deferred_loading", + "Query.add_immediate_loading", + "Query.get_loaded_field_names", + "Query.set_values", + "Query.annotation_select", + "Query.extra_select", + "Query.trim_start", + "Query.is_nullable", + "JoinPromoter.__init__", + "JoinPromoter.update_join_types" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "imports", + "get_field_names_from_opts", + "impl:3", + "RawQuery.__init__", + "RawQuery.get_columns", + "RawQuery.__iter__", + "RawQuery.__repr__", + "RawQuery._execute_query", + "Query", + "Query.__init__", + "Query.has_select_fields", + "Query.get_compiler", + "Query.clone", + "Query.rewrite_cols", + "Query.get_aggregation", + "Query.has_results", + "Query.combine", + "Query.deferred_to_data", + "Query.table_alias", + "Query.promote_joins", + "Query.change_aliases", + "Query.bump_prefix", + "Query.count_active_tables", + "Query.join", + "Query.join_parent_model", + "Query.add_annotation", + "Query.resolve_expression", + "Query.get_external_cols", + "Query.resolve_lookup_value", + "Query.solve_lookup_type", + "Query.check_query_object_type", + "Query.check_related_objects", + "Query.check_filterable", + "Query.build_lookup", + "Query.try_transform", + "Query.build_filter", + "Query.add_q", + "Query._add_q", + "Query.build_filtered_relation_q", + "Query.add_filtered_relation", + "Query.names_to_path", + "Query.setup_joins", + "Query.trim_joins", + "Query.resolve_ref", + "Query.split_exclude", + "Query.set_limits", + "Query.has_limit_one", + "Query.add_fields", + "Query.add_ordering", + "Query.set_group_by", + "Query.add_extra", + "Query.clear_deferred_loading", + "Query.add_deferred_loading", + "Query.add_immediate_loading", + "Query.get_loaded_field_names", + "Query.set_values", + "Query.annotation_select", + "Query.extra_select", + "Query.trim_start", + "Query.is_nullable", + "JoinPromoter.__init__", + "JoinPromoter.update_join_types" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.check_filterable" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.check_filterable" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.check_filterable" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.check_filterable" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/db/models/sql/query.py": [ + "docstring", + "Query.check_filterable" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "docstring", + "Query.check_filterable" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.check_filterable" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.check_filterable" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.check_filterable" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.check_filterable" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.check_filterable" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.check_filterable" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/db/models/sql/query.py": [ + "imports", + "Query.check_filterable" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "imports", + "Query.check_filterable" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.check_filterable" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.check_filterable" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-13033", + "repo": "django/django", + "base_commit": "a59de6e89e8dc1f3e71c9a5a5bbceb373ea5247e", + "problem_statement": "Self referencing foreign key doesn't correctly order by a relation \"_id\" field.\nDescription\n\t\nInitially discovered on 2.2.10 but verified still happens on 3.0.6. Given the following models:\nclass OneModel(models.Model):\n\tclass Meta:\n\t\tordering = (\"-id\",)\n\tid = models.BigAutoField(primary_key=True)\n\troot = models.ForeignKey(\"OneModel\", on_delete=models.CASCADE, null=True)\n\toneval = models.BigIntegerField(null=True)\nclass TwoModel(models.Model):\n\tid = models.BigAutoField(primary_key=True)\n\trecord = models.ForeignKey(OneModel, on_delete=models.CASCADE)\n\ttwoval = models.BigIntegerField(null=True)\nThe following queryset gives unexpected results and appears to be an incorrect SQL query:\nqs = TwoModel.objects.filter(record__oneval__in=[1,2,3])\nqs = qs.order_by(\"record__root_id\")\nprint(qs.query)\nSELECT \"orion_twomodel\".\"id\", \"orion_twomodel\".\"record_id\", \"orion_twomodel\".\"twoval\" FROM \"orion_twomodel\" INNER JOIN \"orion_onemodel\" ON (\"orion_twomodel\".\"record_id\" = \"orion_onemodel\".\"id\") LEFT OUTER JOIN \"orion_onemodel\" T3 ON (\"orion_onemodel\".\"root_id\" = T3.\"id\") WHERE \"orion_onemodel\".\"oneval\" IN (1, 2, 3) ORDER BY T3.\"id\" DESC\nThe query has an unexpected DESCENDING sort. That appears to come from the default sort order on the OneModel class, but I would expect the order_by() to take prececence. The the query has two JOINS, which is unnecessary. It appears that, since OneModel.root is a foreign key to itself, that is causing it to do the unnecessary extra join. In fact, testing a model where root is a foreign key to a third model doesn't show the problem behavior.\nNote also that the queryset with order_by(\"record__root\") gives the exact same SQL.\nThis queryset gives correct results and what looks like a pretty optimal SQL:\nqs = TwoModel.objects.filter(record__oneval__in=[1,2,3])\nqs = qs.order_by(\"record__root__id\")\nprint(qs.query)\nSELECT \"orion_twomodel\".\"id\", \"orion_twomodel\".\"record_id\", \"orion_twomodel\".\"twoval\" FROM \"orion_twomodel\" INNER JOIN \"orion_onemodel\" ON (\"orion_twomodel\".\"record_id\" = \"orion_onemodel\".\"id\") WHERE \"orion_onemodel\".\"oneval\" IN (1, 2, 3) ORDER BY \"orion_onemodel\".\"root_id\" ASC\nSo is this a potential bug or a misunderstanding on my part?\nAnother queryset that works around the issue and gives a reasonable SQL query and expected results:\nqs = TwoModel.objects.filter(record__oneval__in=[1,2,3])\nqs = qs.annotate(root_id=F(\"record__root_id\"))\nqs = qs.order_by(\"root_id\")\nprint(qs.query)\nSELECT \"orion_twomodel\".\"id\", \"orion_twomodel\".\"record_id\", \"orion_twomodel\".\"twoval\" FROM \"orion_twomodel\" INNER JOIN \"orion_onemodel\" ON (\"orion_twomodel\".\"record_id\" = \"orion_onemodel\".\"id\") WHERE \"orion_onemodel\".\"oneval\" IN (1, 2, 3) ORDER BY \"orion_onemodel\".\"zero_id\" ASC\nASCENDING sort, and a single INNER JOIN, as I'd expect. That actually works for my use because I need that output column anyway.\nOne final oddity; with the original queryset but the inverted sort order_by():\nqs = TwoModel.objects.filter(record__oneval__in=[1,2,3])\nqs = qs.order_by(\"-record__root_id\")\nprint(qs.query)\nSELECT \"orion_twomodel\".\"id\", \"orion_twomodel\".\"record_id\", \"orion_twomodel\".\"twoval\" FROM \"orion_twomodel\" INNER JOIN \"orion_onemodel\" ON (\"orion_twomodel\".\"record_id\" = \"orion_onemodel\".\"id\") LEFT OUTER JOIN \"orion_onemodel\" T3 ON (\"orion_onemodel\".\"root_id\" = T3.\"id\") WHERE \"orion_onemodel\".\"oneval\" IN (1, 2, 3) ORDER BY T3.\"id\" ASC\nOne gets the query with the two JOINs but an ASCENDING sort order. I was not under the impression that sort orders are somehow relative to the class level sort order, eg: does specifing order_by(\"-record__root_id\") invert the class sort order? Testing that on a simple case doesn't show that behavior at all.\nThanks for any assistance and clarification.\n", + "golden_patch": "diff --git a/django/db/models/sql/compiler.py b/django/db/models/sql/compiler.py\n--- a/django/db/models/sql/compiler.py\n+++ b/django/db/models/sql/compiler.py\n@@ -727,7 +727,12 @@ def find_ordering_name(self, name, opts, alias=None, default_order='ASC',\n # If we get to this point and the field is a relation to another model,\n # append the default ordering for that model unless it is the pk\n # shortcut or the attribute name of the field that is specified.\n- if field.is_relation and opts.ordering and getattr(field, 'attname', None) != name and name != 'pk':\n+ if (\n+ field.is_relation and\n+ opts.ordering and\n+ getattr(field, 'attname', None) != pieces[-1] and\n+ name != 'pk'\n+ ):\n # Firstly, avoid infinite loops.\n already_seen = already_seen or set()\n join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins)\n", + "test_patch": "diff --git a/tests/ordering/models.py b/tests/ordering/models.py\n--- a/tests/ordering/models.py\n+++ b/tests/ordering/models.py\n@@ -18,6 +18,7 @@\n \n class Author(models.Model):\n name = models.CharField(max_length=63, null=True, blank=True)\n+ editor = models.ForeignKey('self', models.CASCADE, null=True)\n \n class Meta:\n ordering = ('-pk',)\ndiff --git a/tests/ordering/tests.py b/tests/ordering/tests.py\n--- a/tests/ordering/tests.py\n+++ b/tests/ordering/tests.py\n@@ -343,6 +343,22 @@ def test_order_by_fk_attname(self):\n attrgetter(\"headline\")\n )\n \n+ def test_order_by_self_referential_fk(self):\n+ self.a1.author = Author.objects.create(editor=self.author_1)\n+ self.a1.save()\n+ self.a2.author = Author.objects.create(editor=self.author_2)\n+ self.a2.save()\n+ self.assertQuerysetEqual(\n+ Article.objects.filter(author__isnull=False).order_by('author__editor'),\n+ ['Article 2', 'Article 1'],\n+ attrgetter('headline'),\n+ )\n+ self.assertQuerysetEqual(\n+ Article.objects.filter(author__isnull=False).order_by('author__editor_id'),\n+ ['Article 1', 'Article 2'],\n+ attrgetter('headline'),\n+ )\n+\n def test_order_by_f_expression(self):\n self.assertQuerysetEqual(\n Article.objects.order_by(F('headline')), [\n", + "fail_to_pass": "[\"test_order_by_self_referential_fk (ordering.tests.OrderingTests)\"]", + "pass_to_pass": "[\"test_default_ordering (ordering.tests.OrderingTests)\", \"F expressions can be used in Meta.ordering.\", \"test_default_ordering_override (ordering.tests.OrderingTests)\", \"test_extra_ordering (ordering.tests.OrderingTests)\", \"test_extra_ordering_quoting (ordering.tests.OrderingTests)\", \"test_extra_ordering_with_table_name (ordering.tests.OrderingTests)\", \"test_no_reordering_after_slicing (ordering.tests.OrderingTests)\", \"test_order_by_constant_value (ordering.tests.OrderingTests)\", \"test_order_by_constant_value_without_output_field (ordering.tests.OrderingTests)\", \"test_order_by_f_expression (ordering.tests.OrderingTests)\", \"test_order_by_f_expression_duplicates (ordering.tests.OrderingTests)\", \"test_order_by_fk_attname (ordering.tests.OrderingTests)\", \"test_order_by_nulls_first (ordering.tests.OrderingTests)\", \"test_order_by_nulls_first_and_last (ordering.tests.OrderingTests)\", \"test_order_by_nulls_last (ordering.tests.OrderingTests)\", \"test_order_by_override (ordering.tests.OrderingTests)\", \"test_order_by_pk (ordering.tests.OrderingTests)\", \"test_order_by_ptr_field_with_default_ordering_by_expression (ordering.tests.OrderingTests)\", \"test_orders_nulls_first_on_filtered_subquery (ordering.tests.OrderingTests)\", \"test_random_ordering (ordering.tests.OrderingTests)\", \"test_related_ordering_duplicate_table_reference (ordering.tests.OrderingTests)\", \"test_reverse_meta_ordering_pure (ordering.tests.OrderingTests)\", \"test_reverse_ordering_pure (ordering.tests.OrderingTests)\", \"test_reversed_ordering (ordering.tests.OrderingTests)\", \"test_stop_slicing (ordering.tests.OrderingTests)\", \"test_stop_start_slicing (ordering.tests.OrderingTests)\"]", + "expected_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.find_ordering_name" + ] + }, + "test_file_spans": { + "tests/ordering/models.py": [ + "Author" + ], + "tests/ordering/tests.py": [ + "OrderingTests.test_order_by_f_expression" + ] + }, + "resolved_by": [ + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.find_ordering_name" + ] + }, + "alternative_spans": { + "django/db/models/sql/compiler.py": [ + "SQLCompiler.find_ordering_name" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "pytest-dev__pytest-7373", + "repo": "pytest-dev/pytest", + "base_commit": "7b77fc086aab8b3a8ebc890200371884555eea1e", + "problem_statement": "Incorrect caching of skipif/xfail string condition evaluation\nVersion: pytest 5.4.3, current master\r\n\r\npytest caches the evaluation of the string in e.g. `@pytest.mark.skipif(\"sys.platform == 'win32'\")`. The caching key is only the string itself (see `cached_eval` in `_pytest/mark/evaluate.py`). However, the evaluation also depends on the item's globals, so the caching can lead to incorrect results. Example:\r\n\r\n```py\r\n# test_module_1.py\r\nimport pytest\r\n\r\nskip = True\r\n\r\n@pytest.mark.skipif(\"skip\")\r\ndef test_should_skip():\r\n assert False\r\n```\r\n\r\n```py\r\n# test_module_2.py\r\nimport pytest\r\n\r\nskip = False\r\n\r\n@pytest.mark.skipif(\"skip\")\r\ndef test_should_not_skip():\r\n assert False\r\n```\r\n\r\nRunning `pytest test_module_1.py test_module_2.py`.\r\n\r\nExpected: `test_should_skip` is skipped, `test_should_not_skip` is not skipped.\r\n\r\nActual: both are skipped.\r\n\r\n---\r\n\r\nI think the most appropriate fix is to simply remove the caching, which I don't think is necessary really, and inline `cached_eval` into `MarkEvaluator._istrue`.\n", + "golden_patch": "diff --git a/src/_pytest/mark/evaluate.py b/src/_pytest/mark/evaluate.py\n--- a/src/_pytest/mark/evaluate.py\n+++ b/src/_pytest/mark/evaluate.py\n@@ -10,25 +10,14 @@\n from ..outcomes import fail\n from ..outcomes import TEST_OUTCOME\n from .structures import Mark\n-from _pytest.config import Config\n from _pytest.nodes import Item\n-from _pytest.store import StoreKey\n \n \n-evalcache_key = StoreKey[Dict[str, Any]]()\n+def compiled_eval(expr: str, d: Dict[str, object]) -> Any:\n+ import _pytest._code\n \n-\n-def cached_eval(config: Config, expr: str, d: Dict[str, object]) -> Any:\n- default = {} # type: Dict[str, object]\n- evalcache = config._store.setdefault(evalcache_key, default)\n- try:\n- return evalcache[expr]\n- except KeyError:\n- import _pytest._code\n-\n- exprcode = _pytest._code.compile(expr, mode=\"eval\")\n- evalcache[expr] = x = eval(exprcode, d)\n- return x\n+ exprcode = _pytest._code.compile(expr, mode=\"eval\")\n+ return eval(exprcode, d)\n \n \n class MarkEvaluator:\n@@ -98,7 +87,7 @@ def _istrue(self) -> bool:\n self.expr = expr\n if isinstance(expr, str):\n d = self._getglobals()\n- result = cached_eval(self.item.config, expr, d)\n+ result = compiled_eval(expr, d)\n else:\n if \"reason\" not in mark.kwargs:\n # XXX better be checked at collection time\n", + "test_patch": "diff --git a/testing/test_mark.py b/testing/test_mark.py\n--- a/testing/test_mark.py\n+++ b/testing/test_mark.py\n@@ -706,6 +706,36 @@ def test_1(parameter):\n reprec = testdir.inline_run()\n reprec.assertoutcome(skipped=1)\n \n+ def test_reevaluate_dynamic_expr(self, testdir):\n+ \"\"\"#7360\"\"\"\n+ py_file1 = testdir.makepyfile(\n+ test_reevaluate_dynamic_expr1=\"\"\"\n+ import pytest\n+\n+ skip = True\n+\n+ @pytest.mark.skipif(\"skip\")\n+ def test_should_skip():\n+ assert True\n+ \"\"\"\n+ )\n+ py_file2 = testdir.makepyfile(\n+ test_reevaluate_dynamic_expr2=\"\"\"\n+ import pytest\n+\n+ skip = False\n+\n+ @pytest.mark.skipif(\"skip\")\n+ def test_should_not_skip():\n+ assert True\n+ \"\"\"\n+ )\n+\n+ file_name1 = os.path.basename(py_file1.strpath)\n+ file_name2 = os.path.basename(py_file2.strpath)\n+ reprec = testdir.inline_run(file_name1, file_name2)\n+ reprec.assertoutcome(passed=1, skipped=1)\n+\n \n class TestKeywordSelection:\n def test_select_simple(self, testdir):\n", + "fail_to_pass": "[\"testing/test_mark.py::TestFunctional::test_reevaluate_dynamic_expr\"]", + "pass_to_pass": "[\"testing/test_mark.py::TestMark::test_pytest_exists_in_namespace_all[py.test-mark]\", \"testing/test_mark.py::TestMark::test_pytest_exists_in_namespace_all[py.test-param]\", \"testing/test_mark.py::TestMark::test_pytest_exists_in_namespace_all[pytest-mark]\", \"testing/test_mark.py::TestMark::test_pytest_exists_in_namespace_all[pytest-param]\", \"testing/test_mark.py::TestMark::test_pytest_mark_notcallable\", \"testing/test_mark.py::TestMark::test_mark_with_param\", \"testing/test_mark.py::TestMark::test_pytest_mark_name_starts_with_underscore\", \"testing/test_mark.py::TestMarkDecorator::test__eq__[lhs0-rhs0-True]\", \"testing/test_mark.py::TestMarkDecorator::test__eq__[lhs1-rhs1-False]\", \"testing/test_mark.py::TestMarkDecorator::test__eq__[lhs2-bar-False]\", \"testing/test_mark.py::TestMarkDecorator::test__eq__[foo-rhs3-False]\", \"testing/test_mark.py::TestMarkDecorator::test_aliases\", \"testing/test_mark.py::test_addmarker_order\", \"testing/test_mark.py::test_pytest_param_id_requires_string\", \"testing/test_mark.py::test_pytest_param_id_allows_none_or_string[None]\", \"testing/test_mark.py::test_pytest_param_id_allows_none_or_string[hello\", \"testing/test_mark.py::test_marked_class_run_twice\", \"testing/test_mark.py::test_ini_markers\", \"testing/test_mark.py::test_markers_option\", \"testing/test_mark.py::test_ini_markers_whitespace\", \"testing/test_mark.py::test_marker_without_description\", \"testing/test_mark.py::test_markers_option_with_plugin_in_current_dir\", \"testing/test_mark.py::test_mark_on_pseudo_function\", \"testing/test_mark.py::test_strict_prohibits_unregistered_markers[--strict-markers]\", \"testing/test_mark.py::test_strict_prohibits_unregistered_markers[--strict]\", \"testing/test_mark.py::test_mark_option[xyz-expected_passed0]\", \"testing/test_mark.py::test_mark_option[(((\", \"testing/test_mark.py::test_mark_option[not\", \"testing/test_mark.py::test_mark_option[xyz\", \"testing/test_mark.py::test_mark_option[xyz2-expected_passed4]\", \"testing/test_mark.py::test_mark_option_custom[interface-expected_passed0]\", \"testing/test_mark.py::test_mark_option_custom[not\", \"testing/test_mark.py::test_keyword_option_custom[interface-expected_passed0]\", \"testing/test_mark.py::test_keyword_option_custom[not\", \"testing/test_mark.py::test_keyword_option_custom[pass-expected_passed2]\", \"testing/test_mark.py::test_keyword_option_custom[1\", \"testing/test_mark.py::test_keyword_option_considers_mark\", \"testing/test_mark.py::test_keyword_option_parametrize[None-expected_passed0]\", \"testing/test_mark.py::test_keyword_option_parametrize[[1.3]-expected_passed1]\", \"testing/test_mark.py::test_keyword_option_parametrize[2-3-expected_passed2]\", \"testing/test_mark.py::test_parametrize_with_module\", \"testing/test_mark.py::test_keyword_option_wrong_arguments[foo\", \"testing/test_mark.py::test_keyword_option_wrong_arguments[(foo-at\", \"testing/test_mark.py::test_keyword_option_wrong_arguments[or\", \"testing/test_mark.py::test_keyword_option_wrong_arguments[not\", \"testing/test_mark.py::test_parametrized_collected_from_command_line\", \"testing/test_mark.py::test_parametrized_collect_with_wrong_args\", \"testing/test_mark.py::test_parametrized_with_kwargs\", \"testing/test_mark.py::test_parametrize_iterator\", \"testing/test_mark.py::TestFunctional::test_merging_markers_deep\", \"testing/test_mark.py::TestFunctional::test_mark_decorator_subclass_does_not_propagate_to_base\", \"testing/test_mark.py::TestFunctional::test_mark_should_not_pass_to_siebling_class\", \"testing/test_mark.py::TestFunctional::test_mark_decorator_baseclasses_merged\", \"testing/test_mark.py::TestFunctional::test_mark_closest\", \"testing/test_mark.py::TestFunctional::test_mark_with_wrong_marker\", \"testing/test_mark.py::TestFunctional::test_mark_dynamically_in_funcarg\", \"testing/test_mark.py::TestFunctional::test_no_marker_match_on_unmarked_names\", \"testing/test_mark.py::TestFunctional::test_keywords_at_node_level\", \"testing/test_mark.py::TestFunctional::test_keyword_added_for_session\", \"testing/test_mark.py::TestFunctional::test_mark_from_parameters\", \"testing/test_mark.py::TestKeywordSelection::test_select_simple\", \"testing/test_mark.py::TestKeywordSelection::test_select_extra_keywords[xxx]\", \"testing/test_mark.py::TestKeywordSelection::test_select_extra_keywords[xxx\", \"testing/test_mark.py::TestKeywordSelection::test_select_extra_keywords[TestClass]\", \"testing/test_mark.py::TestKeywordSelection::test_select_extra_keywords[TestClass\", \"testing/test_mark.py::TestKeywordSelection::test_select_starton\", \"testing/test_mark.py::TestKeywordSelection::test_keyword_extra\", \"testing/test_mark.py::TestKeywordSelection::test_no_magic_values[__]\", \"testing/test_mark.py::TestKeywordSelection::test_no_magic_values[+]\", \"testing/test_mark.py::TestKeywordSelection::test_no_magic_values[..]\", \"testing/test_mark.py::TestKeywordSelection::test_no_match_directories_outside_the_suite\", \"testing/test_mark.py::test_parameterset_for_parametrize_marks[None]\", \"testing/test_mark.py::test_parameterset_for_parametrize_marks[]\", \"testing/test_mark.py::test_parameterset_for_parametrize_marks[skip]\", \"testing/test_mark.py::test_parameterset_for_parametrize_marks[xfail]\", \"testing/test_mark.py::test_parameterset_for_fail_at_collect\", \"testing/test_mark.py::test_parameterset_for_parametrize_bad_markname\", \"testing/test_mark.py::test_mark_expressions_no_smear\", \"testing/test_mark.py::test_markers_from_parametrize\", \"testing/test_mark.py::test_marker_expr_eval_failure_handling[NOT\", \"testing/test_mark.py::test_marker_expr_eval_failure_handling[bogus/]\"]", + "expected_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "test_file_spans": { + "testing/test_mark.py": [ + "TestFunctional" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator.__init__", + "MarkEvaluator.__bool__", + "MarkEvaluator.wasvalid", + "MarkEvaluator._get_marks", + "MarkEvaluator.invalidraise", + "MarkEvaluator.istrue", + "MarkEvaluator._getglobals", + "MarkEvaluator._istrue", + "MarkEvaluator.getexplanation" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator.__init__", + "MarkEvaluator.__bool__", + "MarkEvaluator.wasvalid", + "MarkEvaluator._get_marks", + "MarkEvaluator.invalidraise", + "MarkEvaluator.istrue", + "MarkEvaluator._getglobals", + "MarkEvaluator._istrue", + "MarkEvaluator.getexplanation" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._getglobals", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._getglobals", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue", + "MarkEvaluator", + "MarkEvaluator.getexplanation" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue", + "MarkEvaluator", + "MarkEvaluator.getexplanation" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator._istrue" + ] + }, + "alternative_spans": { + "src/_pytest/mark/evaluate.py": [ + "imports", + "cached_eval", + "MarkEvaluator._istrue" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240524_opencsg_starship_gpt4", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "run_name": "20240630_agentless_gpt4o", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + } + }, + { + "run_name": "20240828_autose_mixed", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + } + }, + { + "run_name": "20240530_autocoderover-v20240408", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "run_name": "20240702_codestory_aide_mixed", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval" + ] + } + }, + { + "run_name": "20240706_sima_gpt4o", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "run_name": "20240623_moatless_claude35sonnet", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + } + }, + { + "run_name": "20240808_RepoGraph_gpt4o", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + } + }, + { + "run_name": "20240908_infant_gpt4o", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._getglobals", + "MarkEvaluator._istrue" + ] + } + }, + { + "run_name": "20240829_Isoform", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "run_name": "20240604_CodeR", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + } + }, + { + "run_name": "20240728_sweagent_gpt4o", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "run_name": "20241028_agentless-1.5_gpt4o", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue", + "MarkEvaluator", + "MarkEvaluator.getexplanation" + ] + } + }, + { + "run_name": "20240622_Lingma_Agent", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "run_name": "20241016_IBM-SWE-1.0", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "run_name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval" + ] + } + }, + { + "run_name": "20240612_IBM_Research_Agent101", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "MarkEvaluator._istrue" + ] + } + }, + { + "run_name": "20240621_autocoderover-v20240620", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "run_name": "20240617_factory_code_droid", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval" + ] + } + }, + { + "run_name": "20240617_moatless_gpt4o", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + }, + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "src/_pytest/mark/evaluate.py": [ + "cached_eval", + "MarkEvaluator._istrue" + ] + } + } + ] + }, + { + "instance_id": "pytest-dev__pytest-7432", + "repo": "pytest-dev/pytest", + "base_commit": "e6e300e729dd33956e5448d8be9a0b1540b4e53a", + "problem_statement": "skipping: --runxfail breaks pytest.mark.skip location reporting\npytest versions: 5.4.x, current master\r\n\r\nWhen `@pytest.mark.skip`/`skipif` marks are used to skip a test, for example\r\n\r\n```py\r\nimport pytest\r\n@pytest.mark.skip\r\ndef test_skip_location() -> None:\r\n assert 0\r\n```\r\n\r\nthe expected skip location reported should point to the item itself, and this is indeed what happens when running with `pytest -rs`:\r\n\r\n```\r\nSKIPPED [1] test_it.py:3: unconditional skip\r\n```\r\n\r\nHowever, adding `pytest -rs --runxfail` breaks this:\r\n\r\n```\r\nSKIPPED [1] src/_pytest/skipping.py:238: unconditional skip\r\n```\r\n\r\nThe `--runxfail` is only about xfail and should not affect this at all.\r\n\r\n---\r\n\r\nHint: the bug is in `src/_pytest/skipping.py`, the `pytest_runtest_makereport` hook.\n", + "golden_patch": "diff --git a/src/_pytest/skipping.py b/src/_pytest/skipping.py\n--- a/src/_pytest/skipping.py\n+++ b/src/_pytest/skipping.py\n@@ -291,7 +291,8 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]):\n else:\n rep.outcome = \"passed\"\n rep.wasxfail = xfailed.reason\n- elif (\n+\n+ if (\n item._store.get(skipped_by_mark_key, True)\n and rep.skipped\n and type(rep.longrepr) is tuple\n", + "test_patch": "diff --git a/testing/test_skipping.py b/testing/test_skipping.py\n--- a/testing/test_skipping.py\n+++ b/testing/test_skipping.py\n@@ -235,6 +235,31 @@ def test_func2():\n [\"*def test_func():*\", \"*assert 0*\", \"*1 failed*1 pass*\"]\n )\n \n+ @pytest.mark.parametrize(\n+ \"test_input,expected\",\n+ [\n+ (\n+ [\"-rs\"],\n+ [\"SKIPPED [1] test_sample.py:2: unconditional skip\", \"*1 skipped*\"],\n+ ),\n+ (\n+ [\"-rs\", \"--runxfail\"],\n+ [\"SKIPPED [1] test_sample.py:2: unconditional skip\", \"*1 skipped*\"],\n+ ),\n+ ],\n+ )\n+ def test_xfail_run_with_skip_mark(self, testdir, test_input, expected):\n+ testdir.makepyfile(\n+ test_sample=\"\"\"\n+ import pytest\n+ @pytest.mark.skip\n+ def test_skip_location() -> None:\n+ assert 0\n+ \"\"\"\n+ )\n+ result = testdir.runpytest(*test_input)\n+ result.stdout.fnmatch_lines(expected)\n+\n def test_xfail_evalfalse_but_fails(self, testdir):\n item = testdir.getitem(\n \"\"\"\n", + "fail_to_pass": "[\"testing/test_skipping.py::TestXFail::test_xfail_run_with_skip_mark[test_input1-expected1]\"]", + "pass_to_pass": "[\"testing/test_skipping.py::test_importorskip\", \"testing/test_skipping.py::TestEvaluation::test_no_marker\", \"testing/test_skipping.py::TestEvaluation::test_marked_xfail_no_args\", \"testing/test_skipping.py::TestEvaluation::test_marked_skipif_no_args\", \"testing/test_skipping.py::TestEvaluation::test_marked_one_arg\", \"testing/test_skipping.py::TestEvaluation::test_marked_one_arg_with_reason\", \"testing/test_skipping.py::TestEvaluation::test_marked_one_arg_twice\", \"testing/test_skipping.py::TestEvaluation::test_marked_one_arg_twice2\", \"testing/test_skipping.py::TestEvaluation::test_marked_skipif_with_boolean_without_reason\", \"testing/test_skipping.py::TestEvaluation::test_marked_skipif_with_invalid_boolean\", \"testing/test_skipping.py::TestEvaluation::test_skipif_class\", \"testing/test_skipping.py::TestXFail::test_xfail_simple[True]\", \"testing/test_skipping.py::TestXFail::test_xfail_simple[False]\", \"testing/test_skipping.py::TestXFail::test_xfail_xpassed\", \"testing/test_skipping.py::TestXFail::test_xfail_using_platform\", \"testing/test_skipping.py::TestXFail::test_xfail_xpassed_strict\", \"testing/test_skipping.py::TestXFail::test_xfail_run_anyway\", \"testing/test_skipping.py::TestXFail::test_xfail_run_with_skip_mark[test_input0-expected0]\", \"testing/test_skipping.py::TestXFail::test_xfail_evalfalse_but_fails\", \"testing/test_skipping.py::TestXFail::test_xfail_not_report_default\", \"testing/test_skipping.py::TestXFail::test_xfail_not_run_xfail_reporting\", \"testing/test_skipping.py::TestXFail::test_xfail_not_run_no_setup_run\", \"testing/test_skipping.py::TestXFail::test_xfail_xpass\", \"testing/test_skipping.py::TestXFail::test_xfail_imperative\", \"testing/test_skipping.py::TestXFail::test_xfail_imperative_in_setup_function\", \"testing/test_skipping.py::TestXFail::test_dynamic_xfail_no_run\", \"testing/test_skipping.py::TestXFail::test_dynamic_xfail_set_during_funcarg_setup\", \"testing/test_skipping.py::TestXFail::test_xfail_raises[TypeError-TypeError-*1\", \"testing/test_skipping.py::TestXFail::test_xfail_raises[(AttributeError,\", \"testing/test_skipping.py::TestXFail::test_xfail_raises[TypeError-IndexError-*1\", \"testing/test_skipping.py::TestXFail::test_strict_sanity\", \"testing/test_skipping.py::TestXFail::test_strict_xfail[True]\", \"testing/test_skipping.py::TestXFail::test_strict_xfail[False]\", \"testing/test_skipping.py::TestXFail::test_strict_xfail_condition[True]\", \"testing/test_skipping.py::TestXFail::test_strict_xfail_condition[False]\", \"testing/test_skipping.py::TestXFail::test_xfail_condition_keyword[True]\", \"testing/test_skipping.py::TestXFail::test_xfail_condition_keyword[False]\", \"testing/test_skipping.py::TestXFail::test_strict_xfail_default_from_file[true]\", \"testing/test_skipping.py::TestXFail::test_strict_xfail_default_from_file[false]\", \"testing/test_skipping.py::TestXFailwithSetupTeardown::test_failing_setup_issue9\", \"testing/test_skipping.py::TestXFailwithSetupTeardown::test_failing_teardown_issue9\", \"testing/test_skipping.py::TestSkip::test_skip_class\", \"testing/test_skipping.py::TestSkip::test_skips_on_false_string\", \"testing/test_skipping.py::TestSkip::test_arg_as_reason\", \"testing/test_skipping.py::TestSkip::test_skip_no_reason\", \"testing/test_skipping.py::TestSkip::test_skip_with_reason\", \"testing/test_skipping.py::TestSkip::test_only_skips_marked_test\", \"testing/test_skipping.py::TestSkip::test_strict_and_skip\", \"testing/test_skipping.py::TestSkipif::test_skipif_conditional\", \"testing/test_skipping.py::TestSkipif::test_skipif_reporting[\\\"hasattr(sys,\", \"testing/test_skipping.py::TestSkipif::test_skipif_reporting[True,\", \"testing/test_skipping.py::TestSkipif::test_skipif_using_platform\", \"testing/test_skipping.py::TestSkipif::test_skipif_reporting_multiple[skipif-SKIP-skipped]\", \"testing/test_skipping.py::TestSkipif::test_skipif_reporting_multiple[xfail-XPASS-xpassed]\", \"testing/test_skipping.py::test_skip_not_report_default\", \"testing/test_skipping.py::test_skipif_class\", \"testing/test_skipping.py::test_skipped_reasons_functional\", \"testing/test_skipping.py::test_skipped_folding\", \"testing/test_skipping.py::test_reportchars\", \"testing/test_skipping.py::test_reportchars_error\", \"testing/test_skipping.py::test_reportchars_all\", \"testing/test_skipping.py::test_reportchars_all_error\", \"testing/test_skipping.py::test_errors_in_xfail_skip_expressions\", \"testing/test_skipping.py::test_xfail_skipif_with_globals\", \"testing/test_skipping.py::test_default_markers\", \"testing/test_skipping.py::test_xfail_test_setup_exception\", \"testing/test_skipping.py::test_imperativeskip_on_xfail_test\", \"testing/test_skipping.py::TestBooleanCondition::test_skipif\", \"testing/test_skipping.py::TestBooleanCondition::test_skipif_noreason\", \"testing/test_skipping.py::TestBooleanCondition::test_xfail\", \"testing/test_skipping.py::test_xfail_item\", \"testing/test_skipping.py::test_module_level_skip_error\", \"testing/test_skipping.py::test_module_level_skip_with_allow_module_level\", \"testing/test_skipping.py::test_invalid_skip_keyword_parameter\", \"testing/test_skipping.py::test_mark_xfail_item\", \"testing/test_skipping.py::test_summary_list_after_errors\", \"testing/test_skipping.py::test_relpath_rootdir\"]", + "expected_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "test_file_spans": { + "testing/test_skipping.py": [ + "TestXFail.test_xfail_evalfalse_but_fails" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport", + "pytest_report_teststatus" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport", + "pytest_report_teststatus" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ], + "testing/test_skipping.py": [ + "imports" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-13158", + "repo": "django/django", + "base_commit": "7af8f4127397279d19ef7c7899e93018274e2f9b", + "problem_statement": "QuerySet.none() on combined queries returns all results.\nDescription\n\t\nI came across this issue on Stack Overflow. I'm not 100% sure it's a bug, but it does seem strange. With this code (excuse the bizarre example filtering):\nclass Publication(models.Model):\n\tpass\nclass Article(models.Model):\n\tpublications = models.ManyToManyField(to=Publication, blank=True, null=True)\nclass ArticleForm(forms.ModelForm):\n\tpublications = forms.ModelMultipleChoiceField(\n\t\tPublication.objects.filter(id__lt=2) | Publication.objects.filter(id__gt=5),\n\t\trequired=False,\n\t)\n\tclass Meta:\n\t\tmodel = Article\n\t\tfields = [\"publications\"]\nclass ArticleAdmin(admin.ModelAdmin):\n\tform = ArticleForm\nThis works well. However, changing the ModelMultipleChoiceField queryset to use union() breaks things.\npublications = forms.ModelMultipleChoiceField(\n\tPublication.objects.filter(id__lt=2).union(\n\t\tPublication.objects.filter(id__gt=5)\n\t),\n\trequired=False,\n)\nThe form correctly shows only the matching objects. However, if you submit this form while empty (i.e. you didn't select any publications), ALL objects matching the queryset will be added. Using the OR query, NO objects are added, as I'd expect.\n", + "golden_patch": "diff --git a/django/db/models/sql/query.py b/django/db/models/sql/query.py\n--- a/django/db/models/sql/query.py\n+++ b/django/db/models/sql/query.py\n@@ -305,6 +305,7 @@ def clone(self):\n obj.annotation_select_mask = None\n else:\n obj.annotation_select_mask = self.annotation_select_mask.copy()\n+ obj.combined_queries = tuple(query.clone() for query in self.combined_queries)\n # _annotation_select_cache cannot be copied, as doing so breaks the\n # (necessary) state in which both annotations and\n # _annotation_select_cache point to the same underlying objects.\n@@ -1777,6 +1778,8 @@ def split_exclude(self, filter_expr, can_reuse, names_with_path):\n \n def set_empty(self):\n self.where.add(NothingNode(), AND)\n+ for query in self.combined_queries:\n+ query.set_empty()\n \n def is_empty(self):\n return any(isinstance(c, NothingNode) for c in self.where.children)\n", + "test_patch": "diff --git a/tests/queries/test_qs_combinators.py b/tests/queries/test_qs_combinators.py\n--- a/tests/queries/test_qs_combinators.py\n+++ b/tests/queries/test_qs_combinators.py\n@@ -51,6 +51,13 @@ def test_union_distinct(self):\n self.assertEqual(len(list(qs1.union(qs2, all=True))), 20)\n self.assertEqual(len(list(qs1.union(qs2))), 10)\n \n+ def test_union_none(self):\n+ qs1 = Number.objects.filter(num__lte=1)\n+ qs2 = Number.objects.filter(num__gte=8)\n+ qs3 = qs1.union(qs2)\n+ self.assertSequenceEqual(qs3.none(), [])\n+ self.assertNumbersEqual(qs3, [0, 1, 8, 9], ordered=False)\n+\n @skipUnlessDBFeature('supports_select_intersection')\n def test_intersection_with_empty_qs(self):\n qs1 = Number.objects.all()\n", + "fail_to_pass": "[\"test_union_none (queries.test_qs_combinators.QuerySetSetOperationTests)\"]", + "pass_to_pass": "[\"test_combining_multiple_models (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_count_difference (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_count_intersection (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_count_union (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_count_union_empty_result (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_difference_with_empty_qs (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_difference_with_values (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_intersection_with_empty_qs (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_intersection_with_values (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_limits (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_order_by_same_type (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_order_raises_on_non_selected_column (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_ordering (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_ordering_by_alias (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_ordering_by_f_expression (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_ordering_by_f_expression_and_alias (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_qs_with_subcompound_qs (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_simple_difference (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_simple_intersection (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_simple_union (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_union_distinct (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_union_with_empty_qs (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_union_with_extra_and_values_list (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_union_with_two_annotated_values_list (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_union_with_values (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_union_with_values_list_and_order (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_union_with_values_list_on_annotated_and_unannotated (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_unsupported_operations_on_combined_qs (queries.test_qs_combinators.QuerySetSetOperationTests)\", \"test_unsupported_ordering_slicing_raises_db_error (queries.test_qs_combinators.QuerySetSetOperationTests)\"]", + "expected_spans": { + "django/db/models/sql/query.py": [ + "Query.clone", + "Query.set_empty" + ] + }, + "test_file_spans": { + "tests/queries/test_qs_combinators.py": [ + "QuerySetSetOperationTests.test_intersection_with_empty_qs" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/db/models/query.py": [ + "QuerySet.none" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/models/query.py": [ + "QuerySet.none" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/db/models/query.py": [ + "QuerySet.none" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/db/models/query.py": [ + "docstring", + "imports", + "BaseIterable.__init__", + "ModelIterable.__iter__", + "ValuesIterable.__iter__", + "ValuesListIterable", + "ValuesListIterable.__iter__", + "NamedValuesListIterable.create_namedtuple_class", + "NamedValuesListIterable.__iter__", + "FlatValuesListIterable.__iter__", + "QuerySet.__init__", + "QuerySet.as_manager", + "QuerySet:3", + "QuerySet.__getstate__", + "QuerySet.__setstate__", + "QuerySet.__repr__", + "QuerySet.__getitem__", + "QuerySet.__or__", + "QuerySet._iterator", + "QuerySet.iterator", + "QuerySet.aggregate", + "QuerySet.count", + "QuerySet.get", + "QuerySet.bulk_create", + "QuerySet.bulk_update", + "QuerySet.get_or_create", + "QuerySet.update_or_create", + "QuerySet._extract_model_params", + "QuerySet._earliest", + "QuerySet.last", + "QuerySet.in_bulk", + "QuerySet.delete", + "QuerySet.update", + "QuerySet._update", + "QuerySet._prefetch_related_objects", + "QuerySet.explain", + "QuerySet.raw", + "QuerySet.values_list", + "QuerySet.dates", + "QuerySet.datetimes", + "QuerySet.none", + "QuerySet._filter_or_exclude", + "QuerySet._combinator_query", + "QuerySet.union", + "QuerySet.intersection", + "QuerySet.difference", + "QuerySet.select_for_update", + "QuerySet.select_related", + "QuerySet.prefetch_related", + "QuerySet.annotate", + "QuerySet.order_by", + "QuerySet.distinct", + "QuerySet.extra", + "QuerySet.reverse", + "QuerySet.defer", + "QuerySet.only", + "QuerySet.using", + "QuerySet.db", + "QuerySet._insert", + "QuerySet._batched_insert", + "QuerySet._clone", + "QuerySet._merge_sanity_check", + "QuerySet.resolve_expression", + "QuerySet._validate_values_are_expressions", + "QuerySet._not_support_combined_queries", + "InstanceCheckMeta", + "RawQuerySet.__init__", + "RawQuerySet.resolve_model_init_order", + "RawQuerySet.prefetch_related", + "RawQuerySet._prefetch_related_objects", + "RawQuerySet._clone", + "RawQuerySet.iterator", + "RawQuerySet.__repr__", + "RawQuerySet.using", + "RawQuerySet.columns", + "Prefetch.__init__", + "Prefetch.__getstate__", + "prefetch_related_objects", + "get_prefetcher", + "prefetch_one_level", + "RelatedPopulator.__init__" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/db/models/query.py": [ + "QuerySet.none" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/db/models/query.py": [ + "QuerySet.none" + ], + "django/forms/models.py": [ + "ModelMultipleChoiceField.clean" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/db/models/query.py": [ + "QuerySet.none" + ], + "django/forms/models.py": [ + "ModelMultipleChoiceField.clean" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/db/models/query.py": [ + "QuerySet.none" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/db/models/query.py": [ + "QuerySet.none" + ] + }, + "alternative_spans": {} + } + ], + "alternative_spans": [] + }, + { + "instance_id": "pytest-dev__pytest-7490", + "repo": "pytest-dev/pytest", + "base_commit": "7f7a36478abe7dd1fa993b115d22606aa0e35e88", + "problem_statement": "Pytest 6: Dynamically adding xfail marker in test no longer ignores failure\n\r\n\r\n## Description\r\n\r\nWith pytest 5.x, we can dynamically add an xfail to a test `request` object using `request.node.add_marker(mark)` (see example below). In 5.x this treated the failing test like a a test marked statically with an `xfail`. With 6.0.0rc0 it raises. \r\n\r\n## Versions\r\n\r\n
\r\n\r\n```\r\n$ pip list\r\nPackage Version Location \r\n----------------------------- ------------------------------- --------------------------------------------------------------\r\na 1.0 \r\naioftp 0.13.0 \r\naiohttp 3.6.2 \r\nalabaster 0.7.12 \r\napipkg 1.5 \r\naplus 0.11.0 \r\nappdirs 1.4.3 \r\nappnope 0.1.0 \r\narrow 0.15.7 \r\naspy.yaml 1.3.0 \r\nastropy 3.2.3 \r\nasv 0.4.1 \r\nasync-timeout 3.0.1 \r\natomicwrites 1.3.0 \r\nattrs 19.1.0 \r\naws-sam-translator 1.15.1 \r\naws-xray-sdk 0.95 \r\nBabel 2.7.0 \r\nbackcall 0.1.0 \r\nbinaryornot 0.4.4 \r\nblack 19.10b0 \r\nbleach 3.1.0 \r\nblurb 1.0.7 \r\nbokeh 1.3.4 \r\nboto 2.49.0 \r\nboto3 1.7.84 \r\nbotocore 1.10.84 \r\nbqplot 0.12.12 \r\nbranca 0.3.1 \r\ncachetools 4.1.0 \r\ncertifi 2019.9.11 \r\ncffi 1.13.2 \r\ncfgv 2.0.1 \r\ncfn-lint 0.25.0 \r\ncftime 1.0.4.2 \r\nchardet 3.0.4 \r\nClick 7.0 \r\nclick-plugins 1.1.1 \r\ncligj 0.5.0 \r\ncloudpickle 1.2.2 \r\ncolorama 0.4.3 \r\ncolorcet 2.0.2 \r\ncoloredlogs 14.0 \r\ncookiecutter 1.7.2 \r\ncookies 2.2.1 \r\ncoverage 4.5.4 \r\ncryptography 2.8 \r\ncycler 0.10.0 \r\nCython 3.0a5 \r\ncytoolz 0.10.1 \r\ndask 2.4.0 /Users/taugspurger/Envs/pandas-dev/lib/python3.7/site-packages\r\nDateTime 4.3 \r\ndecorator 4.4.0 \r\ndefusedxml 0.6.0 \r\nDeprecated 1.2.7 \r\ndistributed 2.4.0 \r\ndocker 4.1.0 \r\ndocutils 0.15.2 \r\necdsa 0.14.1 \r\nentrypoints 0.3 \r\net-xmlfile 1.0.1 \r\nexecnet 1.7.1 \r\nfastparquet 0.3.3 /Users/taugspurger/sandbox/fastparquet \r\nfeedparser 5.2.1 \r\nFiona 1.8.8 \r\nflake8 3.7.9 \r\nflake8-rst 0.7.1 \r\nfletcher 0.3.1 \r\nflit 2.1.0 \r\nflit-core 2.1.0 \r\nfsspec 0.7.4 \r\nfuture 0.18.2 \r\ngcsfs 0.6.2 \r\ngeopandas 0.6.0+1.g95b8e1a.dirty /Users/taugspurger/sandbox/geopandas \r\ngitdb2 2.0.5 \r\nGitPython 3.0.2 \r\ngoogle-auth 1.16.1 \r\ngoogle-auth-oauthlib 0.4.1 \r\ngraphviz 0.13 \r\nh5py 2.10.0 \r\nHeapDict 1.0.1 \r\nholoviews 1.12.6 \r\nhumanfriendly 8.1 \r\nhunter 3.1.3 \r\nhvplot 0.5.2 \r\nhypothesis 4.36.2 \r\nidentify 1.4.7 \r\nidna 2.8 \r\nimagesize 1.1.0 \r\nimportlib-metadata 0.23 \r\nimportlib-resources 1.0.2 \r\niniconfig 1.0.0 \r\nintake 0.5.3 \r\nipydatawidgets 4.0.1 \r\nipykernel 5.1.2 \r\nipyleaflet 0.13.0 \r\nipympl 0.5.6 \r\nipython 7.11.1 \r\nipython-genutils 0.2.0 \r\nipyvolume 0.5.2 \r\nipyvue 1.3.2 \r\nipyvuetify 1.4.0 \r\nipywebrtc 0.5.0 \r\nipywidgets 7.5.1 \r\nisort 4.3.21 \r\njdcal 1.4.1 \r\njedi 0.16.0 \r\nJinja2 2.11.2 \r\njinja2-time 0.2.0 \r\njmespath 0.9.4 \r\njoblib 0.14.1 \r\njson5 0.9.4 \r\njsondiff 1.1.1 \r\njsonpatch 1.24 \r\njsonpickle 1.2 \r\njsonpointer 2.0 \r\njsonschema 3.0.2 \r\njupyter 1.0.0 \r\njupyter-client 5.3.3 \r\njupyter-console 6.0.0 \r\njupyter-core 4.5.0 \r\njupyterlab 2.1.2 \r\njupyterlab-server 1.1.4 \r\nkiwisolver 1.1.0 \r\nline-profiler 2.1.1 \r\nllvmlite 0.33.0 \r\nlocket 0.2.0 /Users/taugspurger/sandbox/locket.py \r\nlxml 4.5.0 \r\nmanhole 1.6.0 \r\nMarkdown 3.1.1 \r\nMarkupSafe 1.1.1 \r\nmatplotlib 3.2.2 \r\nmccabe 0.6.1 \r\nmemory-profiler 0.55.0 \r\nmistune 0.8.4 \r\nmock 3.0.5 \r\nmore-itertools 7.2.0 \r\nmoto 1.3.6 \r\nmsgpack 0.6.2 \r\nmultidict 4.5.2 \r\nmunch 2.3.2 \r\nmypy 0.730 \r\nmypy-extensions 0.4.1 \r\nnbconvert 5.6.0 \r\nnbformat 4.4.0 \r\nnbsphinx 0.4.2 \r\nnest-asyncio 1.3.3 \r\nnodeenv 1.3.3 \r\nnotebook 6.0.1 \r\nnumexpr 2.7.1 \r\nnumpy 1.19.0 \r\nnumpydoc 1.0.0.dev0 \r\noauthlib 3.1.0 \r\nodfpy 1.4.0 \r\nopenpyxl 3.0.3 \r\npackaging 20.4 \r\npandas 1.1.0.dev0+1758.g035e1fe831 /Users/taugspurger/sandbox/pandas \r\npandas-sphinx-theme 0.0.1.dev0 /Users/taugspurger/sandbox/pandas-sphinx-theme \r\npandocfilters 1.4.2 \r\nparam 1.9.2 \r\nparfive 1.0.0 \r\nparso 0.6.0 \r\npartd 1.0.0 \r\npathspec 0.8.0 \r\npatsy 0.5.1 \r\npexpect 4.7.0 \r\npickleshare 0.7.5 \r\nPillow 6.1.0 \r\npip 20.0.2 \r\npluggy 0.13.0 \r\npoyo 0.5.0 \r\npre-commit 1.18.3 \r\nprogressbar2 3.51.3 \r\nprometheus-client 0.7.1 \r\nprompt-toolkit 2.0.9 \r\npsutil 5.6.3 \r\nptyprocess 0.6.0 \r\npy 1.9.0 \r\npyaml 20.4.0 \r\npyarrow 0.16.0 \r\npyasn1 0.4.7 \r\npyasn1-modules 0.2.8 \r\npycodestyle 2.5.0 \r\npycparser 2.19 \r\npycryptodome 3.9.8 \r\npyct 0.4.6 \r\npydata-sphinx-theme 0.1.1 \r\npydeps 1.9.0 \r\npyflakes 2.1.1 \r\nPyGithub 1.44.1 \r\nPygments 2.4.2 \r\nPyJWT 1.7.1 \r\npyparsing 2.4.2 \r\npyproj 2.4.0 \r\npyrsistent 0.15.4 \r\npytest 5.4.3 \r\npytest-asyncio 0.10.0 \r\npytest-cov 2.8.1 \r\npytest-cover 3.0.0 \r\npytest-forked 1.0.2 \r\npytest-repeat 0.8.0 \r\npytest-xdist 1.29.0 \r\npython-boilerplate 0.1.0 \r\npython-dateutil 2.8.0 \r\npython-jose 2.0.2 \r\npython-jsonrpc-server 0.3.2 \r\npython-language-server 0.31.4 \r\npython-slugify 4.0.1 \r\npython-utils 2.4.0 \r\npythreejs 2.2.0 \r\npytoml 0.1.21 \r\npytz 2019.2 \r\npyviz-comms 0.7.2 \r\nPyYAML 5.1.2 \r\npyzmq 18.1.0 \r\nqtconsole 4.5.5 \r\nregex 2020.6.8 \r\nrequests 2.24.0 \r\nrequests-oauthlib 1.3.0 \r\nresponses 0.10.6 \r\nrsa 4.0 \r\nrstcheck 3.3.1 \r\ns3fs 0.4.2 \r\ns3transfer 0.1.13 \r\nscikit-learn 0.22.2.post1 \r\nscipy 1.3.1 \r\nseaborn 0.9.0 \r\nSend2Trash 1.5.0 \r\nsetuptools 49.2.0 \r\nShapely 1.6.4.post2 \r\nsix 1.12.0 \r\nsmmap2 2.0.5 \r\nsnakeviz 2.0.1 \r\nsnowballstemmer 1.9.1 \r\nsortedcontainers 2.1.0 \r\nsparse 0.10.0 \r\nSphinx 3.1.1 \r\nsphinxcontrib-applehelp 1.0.2 \r\nsphinxcontrib-devhelp 1.0.2 \r\nsphinxcontrib-htmlhelp 1.0.3 \r\nsphinxcontrib-jsmath 1.0.1 \r\nsphinxcontrib-qthelp 1.0.3 \r\nsphinxcontrib-serializinghtml 1.1.4 \r\nsphinxcontrib-websupport 1.1.2 \r\nsphinxcontrib.youtube 0.1.2 \r\nSQLAlchemy 1.3.11 \r\nsshpubkeys 3.1.0 \r\nstatsmodels 0.10.2 \r\nstdlib-list 0.6.0 \r\nsunpy 1.1.dev518+gcad2d473f.d20191103 /Users/taugspurger/sandbox/sunpy \r\ntables 3.6.1 \r\ntabulate 0.8.6 \r\ntblib 1.4.0 \r\nterminado 0.8.2 \r\ntest 1.0.0 \r\ntestpath 0.4.2 \r\ntext-unidecode 1.3 \r\nthrift 0.13.0 \r\ntoml 0.10.0 \r\ntoolz 0.10.0 \r\ntornado 6.0.3 \r\ntqdm 4.37.0 \r\ntraitlets 4.3.2 \r\ntraittypes 0.2.1 \r\ntyped-ast 1.4.0 \r\ntyping-extensions 3.7.4 \r\nujson 1.35 \r\nurllib3 1.25.5 \r\nvaex 3.0.0 \r\nvaex-arrow 0.5.1 \r\nvaex-astro 0.7.0 \r\nvaex-core 2.0.2 \r\nvaex-hdf5 0.6.0 \r\nvaex-jupyter 0.5.1.post0 \r\nvaex-ml 0.9.0 \r\nvaex-server 0.3.1 \r\nvaex-viz 0.4.0 \r\nvirtualenv 16.7.5 \r\nwcwidth 0.1.7 \r\nwebencodings 0.5.1 \r\nwebsocket-client 0.56.0 \r\nWerkzeug 0.16.0 \r\nwheel 0.34.2 \r\nwidgetsnbextension 3.5.1 \r\nwrapt 1.11.2 \r\nxarray 0.14.1+36.gb3d3b448 /Users/taugspurger/sandbox/xarray \r\nxlwt 1.3.0 \r\nxmltodict 0.12.0 \r\nyarl 1.3.0 \r\nzict 1.0.0 \r\nzipp 0.6.0 \r\nzope.interface 4.7.1 \r\n```\r\n\r\n
\r\n\r\n- [ ] pytest and operating system versions\r\n\r\nPytest 6.0.1rc0 and MacOS 10.14.5\r\n\r\n```python\r\n# file: test_foo.py\r\nimport pytest\r\n\r\n\r\ndef test_xfail_test(request):\r\n mark = pytest.mark.xfail(reason=\"xfail\")\r\n request.node.add_marker(mark)\r\n assert 0\r\n```\r\n\r\nWith 5.4.3\r\n\r\n```\r\n\r\n$ pytest -rsx test_foo.py\r\n=============================================================================== test session starts ================================================================================\r\nplatform darwin -- Python 3.7.6, pytest-5.4.3, py-1.9.0, pluggy-0.13.0\r\nhypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('/Users/taugspurger/sandbox/.hypothesis/examples')\r\nrootdir: /Users/taugspurger/sandbox\r\nplugins: xdist-1.29.0, hypothesis-4.36.2, forked-1.0.2, repeat-0.8.0, asyncio-0.10.0, cov-2.8.1\r\ncollected 1 item\r\n\r\ntest_foo.py x [100%]\r\n\r\n============================================================================= short test summary info ==============================================================================\r\nXFAIL test_foo.py::test_xfail_test\r\n xfail\r\n================================================================================ 1 xfailed in 0.07s ================================================================================\r\n```\r\n\r\nWith 6.0.0rc0\r\n\r\n```\r\n$ pytest -rsx test_foo.py\r\n=============================================================================== test session starts ================================================================================\r\nplatform darwin -- Python 3.7.6, pytest-6.0.0rc1, py-1.9.0, pluggy-0.13.0\r\nhypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('/Users/taugspurger/sandbox/.hypothesis/examples')\r\nrootdir: /Users/taugspurger/sandbox\r\nplugins: xdist-1.29.0, hypothesis-4.36.2, forked-1.0.2, repeat-0.8.0, asyncio-0.10.0, cov-2.8.1\r\ncollected 1 item\r\n\r\ntest_foo.py F [100%]\r\n\r\n===================================================================================== FAILURES =====================================================================================\r\n_________________________________________________________________________________ test_xfail_test __________________________________________________________________________________\r\n\r\nrequest = >\r\n\r\n def test_xfail_test(request):\r\n mark = pytest.mark.xfail(reason=\"xfail\")\r\n request.node.add_marker(mark)\r\n> assert 0\r\nE assert 0\r\n\r\ntest_foo.py:7: AssertionError\r\n```\r\n\n", + "golden_patch": "diff --git a/src/_pytest/skipping.py b/src/_pytest/skipping.py\n--- a/src/_pytest/skipping.py\n+++ b/src/_pytest/skipping.py\n@@ -231,17 +231,14 @@ def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:\n \n @hookimpl(tryfirst=True)\n def pytest_runtest_setup(item: Item) -> None:\n- item._store[skipped_by_mark_key] = False\n-\n skipped = evaluate_skip_marks(item)\n+ item._store[skipped_by_mark_key] = skipped is not None\n if skipped:\n- item._store[skipped_by_mark_key] = True\n skip(skipped.reason)\n \n- if not item.config.option.runxfail:\n- item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)\n- if xfailed and not xfailed.run:\n- xfail(\"[NOTRUN] \" + xfailed.reason)\n+ item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)\n+ if xfailed and not item.config.option.runxfail and not xfailed.run:\n+ xfail(\"[NOTRUN] \" + xfailed.reason)\n \n \n @hookimpl(hookwrapper=True)\n@@ -250,12 +247,16 @@ def pytest_runtest_call(item: Item) -> Generator[None, None, None]:\n if xfailed is None:\n item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)\n \n- if not item.config.option.runxfail:\n- if xfailed and not xfailed.run:\n- xfail(\"[NOTRUN] \" + xfailed.reason)\n+ if xfailed and not item.config.option.runxfail and not xfailed.run:\n+ xfail(\"[NOTRUN] \" + xfailed.reason)\n \n yield\n \n+ # The test run may have added an xfail mark dynamically.\n+ xfailed = item._store.get(xfailed_key, None)\n+ if xfailed is None:\n+ item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)\n+\n \n @hookimpl(hookwrapper=True)\n def pytest_runtest_makereport(item: Item, call: CallInfo[None]):\n", + "test_patch": "diff --git a/testing/test_skipping.py b/testing/test_skipping.py\n--- a/testing/test_skipping.py\n+++ b/testing/test_skipping.py\n@@ -1,6 +1,7 @@\n import sys\n \n import pytest\n+from _pytest.pytester import Testdir\n from _pytest.runner import runtestprotocol\n from _pytest.skipping import evaluate_skip_marks\n from _pytest.skipping import evaluate_xfail_marks\n@@ -425,6 +426,33 @@ def test_this2(arg):\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines([\"*1 xfailed*\"])\n \n+ def test_dynamic_xfail_set_during_runtest_failed(self, testdir: Testdir) -> None:\n+ # Issue #7486.\n+ p = testdir.makepyfile(\n+ \"\"\"\n+ import pytest\n+ def test_this(request):\n+ request.node.add_marker(pytest.mark.xfail(reason=\"xfail\"))\n+ assert 0\n+ \"\"\"\n+ )\n+ result = testdir.runpytest(p)\n+ result.assert_outcomes(xfailed=1)\n+\n+ def test_dynamic_xfail_set_during_runtest_passed_strict(\n+ self, testdir: Testdir\n+ ) -> None:\n+ # Issue #7486.\n+ p = testdir.makepyfile(\n+ \"\"\"\n+ import pytest\n+ def test_this(request):\n+ request.node.add_marker(pytest.mark.xfail(reason=\"xfail\", strict=True))\n+ \"\"\"\n+ )\n+ result = testdir.runpytest(p)\n+ result.assert_outcomes(failed=1)\n+\n @pytest.mark.parametrize(\n \"expected, actual, matchline\",\n [\n", + "fail_to_pass": "[\"testing/test_skipping.py::TestXFail::test_dynamic_xfail_set_during_runtest_failed\", \"testing/test_skipping.py::TestXFail::test_dynamic_xfail_set_during_runtest_passed_strict\"]", + "pass_to_pass": "[\"testing/test_skipping.py::test_importorskip\", \"testing/test_skipping.py::TestEvaluation::test_no_marker\", \"testing/test_skipping.py::TestEvaluation::test_marked_xfail_no_args\", \"testing/test_skipping.py::TestEvaluation::test_marked_skipif_no_args\", \"testing/test_skipping.py::TestEvaluation::test_marked_one_arg\", \"testing/test_skipping.py::TestEvaluation::test_marked_one_arg_with_reason\", \"testing/test_skipping.py::TestEvaluation::test_marked_one_arg_twice\", \"testing/test_skipping.py::TestEvaluation::test_marked_one_arg_twice2\", \"testing/test_skipping.py::TestEvaluation::test_marked_skipif_with_boolean_without_reason\", \"testing/test_skipping.py::TestEvaluation::test_marked_skipif_with_invalid_boolean\", \"testing/test_skipping.py::TestEvaluation::test_skipif_class\", \"testing/test_skipping.py::TestXFail::test_xfail_simple[True]\", \"testing/test_skipping.py::TestXFail::test_xfail_simple[False]\", \"testing/test_skipping.py::TestXFail::test_xfail_xpassed\", \"testing/test_skipping.py::TestXFail::test_xfail_using_platform\", \"testing/test_skipping.py::TestXFail::test_xfail_xpassed_strict\", \"testing/test_skipping.py::TestXFail::test_xfail_run_anyway\", \"testing/test_skipping.py::TestXFail::test_xfail_run_with_skip_mark[test_input0-expected0]\", \"testing/test_skipping.py::TestXFail::test_xfail_run_with_skip_mark[test_input1-expected1]\", \"testing/test_skipping.py::TestXFail::test_xfail_evalfalse_but_fails\", \"testing/test_skipping.py::TestXFail::test_xfail_not_report_default\", \"testing/test_skipping.py::TestXFail::test_xfail_not_run_xfail_reporting\", \"testing/test_skipping.py::TestXFail::test_xfail_not_run_no_setup_run\", \"testing/test_skipping.py::TestXFail::test_xfail_xpass\", \"testing/test_skipping.py::TestXFail::test_xfail_imperative\", \"testing/test_skipping.py::TestXFail::test_xfail_imperative_in_setup_function\", \"testing/test_skipping.py::TestXFail::test_dynamic_xfail_no_run\", \"testing/test_skipping.py::TestXFail::test_dynamic_xfail_set_during_funcarg_setup\", \"testing/test_skipping.py::TestXFail::test_xfail_raises[TypeError-TypeError-*1\", \"testing/test_skipping.py::TestXFail::test_xfail_raises[(AttributeError,\", \"testing/test_skipping.py::TestXFail::test_xfail_raises[TypeError-IndexError-*1\", \"testing/test_skipping.py::TestXFail::test_strict_sanity\", \"testing/test_skipping.py::TestXFail::test_strict_xfail[True]\", \"testing/test_skipping.py::TestXFail::test_strict_xfail[False]\", \"testing/test_skipping.py::TestXFail::test_strict_xfail_condition[True]\", \"testing/test_skipping.py::TestXFail::test_strict_xfail_condition[False]\", \"testing/test_skipping.py::TestXFail::test_xfail_condition_keyword[True]\", \"testing/test_skipping.py::TestXFail::test_xfail_condition_keyword[False]\", \"testing/test_skipping.py::TestXFail::test_strict_xfail_default_from_file[true]\", \"testing/test_skipping.py::TestXFail::test_strict_xfail_default_from_file[false]\", \"testing/test_skipping.py::TestXFailwithSetupTeardown::test_failing_setup_issue9\", \"testing/test_skipping.py::TestXFailwithSetupTeardown::test_failing_teardown_issue9\", \"testing/test_skipping.py::TestSkip::test_skip_class\", \"testing/test_skipping.py::TestSkip::test_skips_on_false_string\", \"testing/test_skipping.py::TestSkip::test_arg_as_reason\", \"testing/test_skipping.py::TestSkip::test_skip_no_reason\", \"testing/test_skipping.py::TestSkip::test_skip_with_reason\", \"testing/test_skipping.py::TestSkip::test_only_skips_marked_test\", \"testing/test_skipping.py::TestSkip::test_strict_and_skip\", \"testing/test_skipping.py::TestSkipif::test_skipif_conditional\", \"testing/test_skipping.py::TestSkipif::test_skipif_reporting[\\\"hasattr(sys,\", \"testing/test_skipping.py::TestSkipif::test_skipif_reporting[True,\", \"testing/test_skipping.py::TestSkipif::test_skipif_using_platform\", \"testing/test_skipping.py::TestSkipif::test_skipif_reporting_multiple[skipif-SKIP-skipped]\", \"testing/test_skipping.py::TestSkipif::test_skipif_reporting_multiple[xfail-XPASS-xpassed]\", \"testing/test_skipping.py::test_skip_not_report_default\", \"testing/test_skipping.py::test_skipif_class\", \"testing/test_skipping.py::test_skipped_reasons_functional\", \"testing/test_skipping.py::test_skipped_folding\", \"testing/test_skipping.py::test_reportchars\", \"testing/test_skipping.py::test_reportchars_error\", \"testing/test_skipping.py::test_reportchars_all\", \"testing/test_skipping.py::test_reportchars_all_error\", \"testing/test_skipping.py::test_errors_in_xfail_skip_expressions\", \"testing/test_skipping.py::test_xfail_skipif_with_globals\", \"testing/test_skipping.py::test_default_markers\", \"testing/test_skipping.py::test_xfail_test_setup_exception\", \"testing/test_skipping.py::test_imperativeskip_on_xfail_test\", \"testing/test_skipping.py::TestBooleanCondition::test_skipif\", \"testing/test_skipping.py::TestBooleanCondition::test_skipif_noreason\", \"testing/test_skipping.py::TestBooleanCondition::test_xfail\", \"testing/test_skipping.py::test_xfail_item\", \"testing/test_skipping.py::test_module_level_skip_error\", \"testing/test_skipping.py::test_module_level_skip_with_allow_module_level\", \"testing/test_skipping.py::test_invalid_skip_keyword_parameter\", \"testing/test_skipping.py::test_mark_xfail_item\", \"testing/test_skipping.py::test_summary_list_after_errors\", \"testing/test_skipping.py::test_relpath_rootdir\"]", + "expected_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_setup", + "pytest_runtest_call" + ] + }, + "test_file_spans": { + "testing/test_skipping.py": [ + "imports", + "TestXFail.test_xfail_raises" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_call" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_call" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "src/_pytest/nodes.py": [ + "imports", + "_splitnode", + "ischildnode", + "impl:10", + "NodeMeta.__call__", + "Node", + "Node.__init__", + "Node.from_parent", + "Node.__repr__", + "Node.warn", + "Node.nodeid", + "Node.__hash__", + "Node.setup", + "Node.teardown", + "Node.listchain", + "Node.add_marker", + "Node.iter_markers", + "Node.iter_markers_with_node", + "Node.get_closest_marker", + "Node.get_closest_marker_13", + "Node.get_closest_marker_14", + "Node.listextrakeywords", + "Node.listnames", + "Node.addfinalizer", + "Node.getparent", + "Node._repr_failure_py", + "Node.repr_failure", + "get_fslocation_from_item", + "Collector.CollectError", + "Collector.collect", + "Collector.repr_failure", + "Collector._prunetraceback", + "FSHookProxy.__init__", + "FSCollector.__init__", + "FSCollector._gethookproxy", + "FSCollector.gethookproxy", + "FSCollector._recurse", + "FSCollector", + "FSCollector.isinitpath", + "FSCollector._collectfile", + "Item", + "Item.__init__", + "Item.runtest", + "Item.add_report_section", + "Item.reportinfo", + "Item.location" + ] + }, + "alternative_spans": {} + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_call", + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_call", + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + }, + "alternative_spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "src/_pytest/nodes.py": [ + "Node.iter_markers" + ] + }, + "alternative_spans": {} + } + ], + "alternative_spans": [ + { + "run_name": "20240523_aider", + "spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_call" + ] + } + }, + { + "run_name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_call", + "pytest_runtest_makereport" + ] + } + }, + { + "run_name": "20240912_marscode-agent-dev", + "spans": { + "src/_pytest/skipping.py": [ + "pytest_runtest_makereport" + ] + } + } + ] + }, + { + "instance_id": "sphinx-doc__sphinx-7975", + "repo": "sphinx-doc/sphinx", + "base_commit": "4ec6cbe341fd84468c448e20082c778043bbea4b", + "problem_statement": "Two sections called Symbols in index\nWhen using index entries with the following leading characters: _@_, _\u00a3_, and _\u2190_ I get two sections called _Symbols_ in the HTML output, the first containing all _@_ entries before \u201dnormal\u201d words and the second containing _\u00a3_ and _\u2190_ entries after the \u201dnormal\u201d words. Both have the same anchor in HTML so the links at the top of the index page contain two _Symbols_ links, one before the letters and one after, but both lead to the first section.\n\n", + "golden_patch": "diff --git a/sphinx/environment/adapters/indexentries.py b/sphinx/environment/adapters/indexentries.py\n--- a/sphinx/environment/adapters/indexentries.py\n+++ b/sphinx/environment/adapters/indexentries.py\n@@ -98,9 +98,8 @@ def keyfunc0(entry: Tuple[str, str]) -> Tuple[bool, str]:\n for subentry in indexentry[1].values():\n subentry[0].sort(key=keyfunc0) # type: ignore\n \n- # sort the index entries; put all symbols at the front, even those\n- # following the letters in ASCII, this is where the chr(127) comes from\n- def keyfunc(entry: Tuple[str, List]) -> Tuple[str, str]:\n+ # sort the index entries\n+ def keyfunc(entry: Tuple[str, List]) -> Tuple[Tuple[int, str], str]:\n key, (void, void, category_key) = entry\n if category_key:\n # using specified category key to sort\n@@ -108,11 +107,16 @@ def keyfunc(entry: Tuple[str, List]) -> Tuple[str, str]:\n lckey = unicodedata.normalize('NFD', key.lower())\n if lckey.startswith('\\N{RIGHT-TO-LEFT MARK}'):\n lckey = lckey[1:]\n+\n if lckey[0:1].isalpha() or lckey.startswith('_'):\n- lckey = chr(127) + lckey\n+ # put non-symbol characters at the folloing group (1)\n+ sortkey = (1, lckey)\n+ else:\n+ # put symbols at the front of the index (0)\n+ sortkey = (0, lckey)\n # ensure a determinstic order *within* letters by also sorting on\n # the entry itself\n- return (lckey, entry[0])\n+ return (sortkey, entry[0])\n newlist = sorted(new.items(), key=keyfunc)\n \n if group_entries:\n", + "test_patch": "diff --git a/tests/test_environment_indexentries.py b/tests/test_environment_indexentries.py\n--- a/tests/test_environment_indexentries.py\n+++ b/tests/test_environment_indexentries.py\n@@ -25,12 +25,14 @@ def test_create_single_index(app):\n \".. index:: \u0451\u043b\u043a\u0430\\n\"\n \".. index:: \u200f\u05ea\u05d9\u05e8\u05d1\u05e2\u200e\\n\"\n \".. index:: 9-symbol\\n\"\n- \".. index:: &-symbol\\n\")\n+ \".. index:: &-symbol\\n\"\n+ \".. index:: \u00a3100\\n\")\n restructuredtext.parse(app, text)\n index = IndexEntries(app.env).create_index(app.builder)\n assert len(index) == 6\n assert index[0] == ('Symbols', [('&-symbol', [[('', '#index-9')], [], None]),\n- ('9-symbol', [[('', '#index-8')], [], None])])\n+ ('9-symbol', [[('', '#index-8')], [], None]),\n+ ('\u00a3100', [[('', '#index-10')], [], None])])\n assert index[1] == ('D', [('docutils', [[('', '#index-0')], [], None])])\n assert index[2] == ('P', [('pip', [[], [('install', [('', '#index-2')]),\n ('upgrade', [('', '#index-3')])], None]),\n", + "fail_to_pass": "[\"tests/test_environment_indexentries.py::test_create_single_index\"]", + "pass_to_pass": "[\"tests/test_environment_indexentries.py::test_create_pair_index\", \"tests/test_environment_indexentries.py::test_create_triple_index\", \"tests/test_environment_indexentries.py::test_create_see_index\", \"tests/test_environment_indexentries.py::test_create_seealso_index\", \"tests/test_environment_indexentries.py::test_create_main_index\", \"tests/test_environment_indexentries.py::test_create_index_with_name\", \"tests/test_environment_indexentries.py::test_create_index_by_key\"]", + "expected_spans": { + "sphinx/environment/adapters/indexentries.py": [ + "IndexEntries.create_index" + ] + }, + "test_file_spans": { + "tests/test_environment_indexentries.py": [ + "test_create_single_index" + ] + }, + "resolved_by": [ + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "sphinx/environment/adapters/indexentries.py": [ + "IndexEntries.create_index" + ] + }, + "alternative_spans": { + "sphinx/environment/adapters/indexentries.py": [ + "IndexEntries.create_index" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sphinx/environment/adapters/indexentries.py": [ + "IndexEntries.create_index" + ] + }, + "alternative_spans": { + "sphinx/environment/adapters/indexentries.py": [ + "IndexEntries.create_index" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sphinx/environment/adapters/indexentries.py": [ + "IndexEntries.create_index" + ] + }, + "alternative_spans": { + "sphinx/environment/adapters/indexentries.py": [ + "IndexEntries.create_index" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "sphinx/environment/adapters/indexentries.py": [ + "IndexEntries.create_index" + ] + }, + "alternative_spans": { + "sphinx/environment/adapters/indexentries.py": [ + "IndexEntries.create_index" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-13220", + "repo": "django/django", + "base_commit": "16218c20606d8cd89c5393970c83da04598a3e04", + "problem_statement": "Allow ValidationErrors to equal each other when created identically\nDescription\n\t \n\t\t(last modified by kamni)\n\t \nCurrently ValidationErrors (django.core.exceptions.ValidationError) that have identical messages don't equal each other, which is counter-intuitive, and can make certain kinds of testing more complicated. Please add an __eq__ method that allows two ValidationErrors to be compared. \nIdeally, this would be more than just a simple self.messages == other.messages. It would be most helpful if the comparison were independent of the order in which errors were raised in a field or in non_field_errors.\n", + "golden_patch": "diff --git a/django/core/exceptions.py b/django/core/exceptions.py\n--- a/django/core/exceptions.py\n+++ b/django/core/exceptions.py\n@@ -1,6 +1,9 @@\n \"\"\"\n Global Django exception and warning classes.\n \"\"\"\n+import operator\n+\n+from django.utils.hashable import make_hashable\n \n \n class FieldDoesNotExist(Exception):\n@@ -182,6 +185,23 @@ def __str__(self):\n def __repr__(self):\n return 'ValidationError(%s)' % self\n \n+ def __eq__(self, other):\n+ if not isinstance(other, ValidationError):\n+ return NotImplemented\n+ return hash(self) == hash(other)\n+\n+ def __hash__(self):\n+ # Ignore params and messages ordering.\n+ if hasattr(self, 'message'):\n+ return hash((\n+ self.message,\n+ self.code,\n+ tuple(sorted(make_hashable(self.params))) if self.params else None,\n+ ))\n+ if hasattr(self, 'error_dict'):\n+ return hash(tuple(sorted(make_hashable(self.error_dict))))\n+ return hash(tuple(sorted(self.error_list, key=operator.attrgetter('message'))))\n+\n \n class EmptyResultSet(Exception):\n \"\"\"A database query predicate is impossible.\"\"\"\n", + "test_patch": "diff --git a/tests/test_exceptions/test_validation_error.py b/tests/test_exceptions/test_validation_error.py\n--- a/tests/test_exceptions/test_validation_error.py\n+++ b/tests/test_exceptions/test_validation_error.py\n@@ -1,4 +1,5 @@\n import unittest\n+from unittest import mock\n \n from django.core.exceptions import ValidationError\n \n@@ -14,3 +15,271 @@ def test_messages_concatenates_error_dict_values(self):\n message_dict['field2'] = ['E3', 'E4']\n exception = ValidationError(message_dict)\n self.assertEqual(sorted(exception.messages), ['E1', 'E2', 'E3', 'E4'])\n+\n+ def test_eq(self):\n+ error1 = ValidationError('message')\n+ error2 = ValidationError('message', code='my_code1')\n+ error3 = ValidationError('message', code='my_code2')\n+ error4 = ValidationError(\n+ 'error %(parm1)s %(parm2)s',\n+ code='my_code1',\n+ params={'parm1': 'val1', 'parm2': 'val2'},\n+ )\n+ error5 = ValidationError({'field1': 'message', 'field2': 'other'})\n+ error6 = ValidationError({'field1': 'message'})\n+ error7 = ValidationError([\n+ ValidationError({'field1': 'field error', 'field2': 'other'}),\n+ 'message',\n+ ])\n+\n+ self.assertEqual(error1, ValidationError('message'))\n+ self.assertNotEqual(error1, ValidationError('message2'))\n+ self.assertNotEqual(error1, error2)\n+ self.assertNotEqual(error1, error4)\n+ self.assertNotEqual(error1, error5)\n+ self.assertNotEqual(error1, error6)\n+ self.assertNotEqual(error1, error7)\n+ self.assertEqual(error1, mock.ANY)\n+ self.assertEqual(error2, ValidationError('message', code='my_code1'))\n+ self.assertNotEqual(error2, ValidationError('other', code='my_code1'))\n+ self.assertNotEqual(error2, error3)\n+ self.assertNotEqual(error2, error4)\n+ self.assertNotEqual(error2, error5)\n+ self.assertNotEqual(error2, error6)\n+ self.assertNotEqual(error2, error7)\n+\n+ self.assertEqual(error4, ValidationError(\n+ 'error %(parm1)s %(parm2)s',\n+ code='my_code1',\n+ params={'parm1': 'val1', 'parm2': 'val2'},\n+ ))\n+ self.assertNotEqual(error4, ValidationError(\n+ 'error %(parm1)s %(parm2)s',\n+ code='my_code2',\n+ params={'parm1': 'val1', 'parm2': 'val2'},\n+ ))\n+ self.assertNotEqual(error4, ValidationError(\n+ 'error %(parm1)s %(parm2)s',\n+ code='my_code1',\n+ params={'parm2': 'val2'},\n+ ))\n+ self.assertNotEqual(error4, ValidationError(\n+ 'error %(parm1)s %(parm2)s',\n+ code='my_code1',\n+ params={'parm2': 'val1', 'parm1': 'val2'},\n+ ))\n+ self.assertNotEqual(error4, ValidationError(\n+ 'error val1 val2',\n+ code='my_code1',\n+ ))\n+ # params ordering is ignored.\n+ self.assertEqual(error4, ValidationError(\n+ 'error %(parm1)s %(parm2)s',\n+ code='my_code1',\n+ params={'parm2': 'val2', 'parm1': 'val1'},\n+ ))\n+\n+ self.assertEqual(\n+ error5,\n+ ValidationError({'field1': 'message', 'field2': 'other'}),\n+ )\n+ self.assertNotEqual(\n+ error5,\n+ ValidationError({'field1': 'message', 'field2': 'other2'}),\n+ )\n+ self.assertNotEqual(\n+ error5,\n+ ValidationError({'field1': 'message', 'field3': 'other'}),\n+ )\n+ self.assertNotEqual(error5, error6)\n+ # fields ordering is ignored.\n+ self.assertEqual(\n+ error5,\n+ ValidationError({'field2': 'other', 'field1': 'message'}),\n+ )\n+\n+ self.assertNotEqual(error7, ValidationError(error7.error_list[1:]))\n+ self.assertNotEqual(\n+ ValidationError(['message']),\n+ ValidationError([ValidationError('message', code='my_code')]),\n+ )\n+ # messages ordering is ignored.\n+ self.assertEqual(\n+ error7,\n+ ValidationError(list(reversed(error7.error_list))),\n+ )\n+\n+ self.assertNotEqual(error4, ValidationError([error4]))\n+ self.assertNotEqual(ValidationError([error4]), error4)\n+ self.assertNotEqual(error4, ValidationError({'field1': error4}))\n+ self.assertNotEqual(ValidationError({'field1': error4}), error4)\n+\n+ def test_eq_nested(self):\n+ error_dict = {\n+ 'field1': ValidationError(\n+ 'error %(parm1)s %(parm2)s',\n+ code='my_code',\n+ params={'parm1': 'val1', 'parm2': 'val2'},\n+ ),\n+ 'field2': 'other',\n+ }\n+ error = ValidationError(error_dict)\n+ self.assertEqual(error, ValidationError(dict(error_dict)))\n+ self.assertEqual(error, ValidationError({\n+ 'field1': ValidationError(\n+ 'error %(parm1)s %(parm2)s',\n+ code='my_code',\n+ params={'parm2': 'val2', 'parm1': 'val1'},\n+ ),\n+ 'field2': 'other',\n+ }))\n+ self.assertNotEqual(error, ValidationError(\n+ {**error_dict, 'field2': 'message'},\n+ ))\n+ self.assertNotEqual(error, ValidationError({\n+ 'field1': ValidationError(\n+ 'error %(parm1)s val2',\n+ code='my_code',\n+ params={'parm1': 'val1'},\n+ ),\n+ 'field2': 'other',\n+ }))\n+\n+ def test_hash(self):\n+ error1 = ValidationError('message')\n+ error2 = ValidationError('message', code='my_code1')\n+ error3 = ValidationError('message', code='my_code2')\n+ error4 = ValidationError(\n+ 'error %(parm1)s %(parm2)s',\n+ code='my_code1',\n+ params={'parm1': 'val1', 'parm2': 'val2'},\n+ )\n+ error5 = ValidationError({'field1': 'message', 'field2': 'other'})\n+ error6 = ValidationError({'field1': 'message'})\n+ error7 = ValidationError([\n+ ValidationError({'field1': 'field error', 'field2': 'other'}),\n+ 'message',\n+ ])\n+\n+ self.assertEqual(hash(error1), hash(ValidationError('message')))\n+ self.assertNotEqual(hash(error1), hash(ValidationError('message2')))\n+ self.assertNotEqual(hash(error1), hash(error2))\n+ self.assertNotEqual(hash(error1), hash(error4))\n+ self.assertNotEqual(hash(error1), hash(error5))\n+ self.assertNotEqual(hash(error1), hash(error6))\n+ self.assertNotEqual(hash(error1), hash(error7))\n+ self.assertEqual(\n+ hash(error2),\n+ hash(ValidationError('message', code='my_code1')),\n+ )\n+ self.assertNotEqual(\n+ hash(error2),\n+ hash(ValidationError('other', code='my_code1')),\n+ )\n+ self.assertNotEqual(hash(error2), hash(error3))\n+ self.assertNotEqual(hash(error2), hash(error4))\n+ self.assertNotEqual(hash(error2), hash(error5))\n+ self.assertNotEqual(hash(error2), hash(error6))\n+ self.assertNotEqual(hash(error2), hash(error7))\n+\n+ self.assertEqual(hash(error4), hash(ValidationError(\n+ 'error %(parm1)s %(parm2)s',\n+ code='my_code1',\n+ params={'parm1': 'val1', 'parm2': 'val2'},\n+ )))\n+ self.assertNotEqual(hash(error4), hash(ValidationError(\n+ 'error %(parm1)s %(parm2)s',\n+ code='my_code2',\n+ params={'parm1': 'val1', 'parm2': 'val2'},\n+ )))\n+ self.assertNotEqual(hash(error4), hash(ValidationError(\n+ 'error %(parm1)s %(parm2)s',\n+ code='my_code1',\n+ params={'parm2': 'val2'},\n+ )))\n+ self.assertNotEqual(hash(error4), hash(ValidationError(\n+ 'error %(parm1)s %(parm2)s',\n+ code='my_code1',\n+ params={'parm2': 'val1', 'parm1': 'val2'},\n+ )))\n+ self.assertNotEqual(hash(error4), hash(ValidationError(\n+ 'error val1 val2',\n+ code='my_code1',\n+ )))\n+ # params ordering is ignored.\n+ self.assertEqual(hash(error4), hash(ValidationError(\n+ 'error %(parm1)s %(parm2)s',\n+ code='my_code1',\n+ params={'parm2': 'val2', 'parm1': 'val1'},\n+ )))\n+\n+ self.assertEqual(\n+ hash(error5),\n+ hash(ValidationError({'field1': 'message', 'field2': 'other'})),\n+ )\n+ self.assertNotEqual(\n+ hash(error5),\n+ hash(ValidationError({'field1': 'message', 'field2': 'other2'})),\n+ )\n+ self.assertNotEqual(\n+ hash(error5),\n+ hash(ValidationError({'field1': 'message', 'field3': 'other'})),\n+ )\n+ self.assertNotEqual(error5, error6)\n+ # fields ordering is ignored.\n+ self.assertEqual(\n+ hash(error5),\n+ hash(ValidationError({'field2': 'other', 'field1': 'message'})),\n+ )\n+\n+ self.assertNotEqual(\n+ hash(error7),\n+ hash(ValidationError(error7.error_list[1:])),\n+ )\n+ self.assertNotEqual(\n+ hash(ValidationError(['message'])),\n+ hash(ValidationError([ValidationError('message', code='my_code')])),\n+ )\n+ # messages ordering is ignored.\n+ self.assertEqual(\n+ hash(error7),\n+ hash(ValidationError(list(reversed(error7.error_list)))),\n+ )\n+\n+ self.assertNotEqual(hash(error4), hash(ValidationError([error4])))\n+ self.assertNotEqual(hash(ValidationError([error4])), hash(error4))\n+ self.assertNotEqual(\n+ hash(error4),\n+ hash(ValidationError({'field1': error4})),\n+ )\n+\n+ def test_hash_nested(self):\n+ error_dict = {\n+ 'field1': ValidationError(\n+ 'error %(parm1)s %(parm2)s',\n+ code='my_code',\n+ params={'parm2': 'val2', 'parm1': 'val1'},\n+ ),\n+ 'field2': 'other',\n+ }\n+ error = ValidationError(error_dict)\n+ self.assertEqual(hash(error), hash(ValidationError(dict(error_dict))))\n+ self.assertEqual(hash(error), hash(ValidationError({\n+ 'field1': ValidationError(\n+ 'error %(parm1)s %(parm2)s',\n+ code='my_code',\n+ params={'parm1': 'val1', 'parm2': 'val2'},\n+ ),\n+ 'field2': 'other',\n+ })))\n+ self.assertNotEqual(hash(error), hash(ValidationError(\n+ {**error_dict, 'field2': 'message'},\n+ )))\n+ self.assertNotEqual(hash(error), hash(ValidationError({\n+ 'field1': ValidationError(\n+ 'error %(parm1)s val2',\n+ code='my_code',\n+ params={'parm1': 'val1'},\n+ ),\n+ 'field2': 'other',\n+ })))\n", + "fail_to_pass": "[\"test_eq (test_exceptions.test_validation_error.TestValidationError)\", \"test_eq_nested (test_exceptions.test_validation_error.TestValidationError)\", \"test_hash (test_exceptions.test_validation_error.TestValidationError)\", \"test_hash_nested (test_exceptions.test_validation_error.TestValidationError)\"]", + "pass_to_pass": "[\"test_messages_concatenates_error_dict_values (test_exceptions.test_validation_error.TestValidationError)\"]", + "expected_spans": { + "django/core/exceptions.py": [ + "docstring", + "ValidationError" + ] + }, + "test_file_spans": { + "tests/test_exceptions/test_validation_error.py": [ + "imports" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "pydata__xarray-4248", + "repo": "pydata/xarray", + "base_commit": "98dc1f4ea18738492e074e9e51ddfed5cd30ab94", + "problem_statement": "Feature request: show units in dataset overview\nHere's a hypothetical dataset:\r\n\r\n```\r\n\r\nDimensions: (time: 3, x: 988, y: 822)\r\nCoordinates:\r\n * x (x) float64 ...\r\n * y (y) float64 ...\r\n * time (time) datetime64[ns] ...\r\nData variables:\r\n rainfall (time, y, x) float32 ...\r\n max_temp (time, y, x) float32 ...\r\n```\r\n\r\nIt would be really nice if the units of the coordinates and of the data variables were shown in the `Dataset` repr, for example as:\r\n\r\n```\r\n\r\nDimensions: (time: 3, x: 988, y: 822)\r\nCoordinates:\r\n * x, in metres (x) float64 ...\r\n * y, in metres (y) float64 ...\r\n * time (time) datetime64[ns] ...\r\nData variables:\r\n rainfall, in mm (time, y, x) float32 ...\r\n max_temp, in deg C (time, y, x) float32 ...\r\n```\n", + "golden_patch": "diff --git a/xarray/core/formatting.py b/xarray/core/formatting.py\n--- a/xarray/core/formatting.py\n+++ b/xarray/core/formatting.py\n@@ -261,6 +261,8 @@ def inline_variable_array_repr(var, max_width):\n return inline_dask_repr(var.data)\n elif isinstance(var._data, sparse_array_type):\n return inline_sparse_repr(var.data)\n+ elif hasattr(var._data, \"_repr_inline_\"):\n+ return var._data._repr_inline_(max_width)\n elif hasattr(var._data, \"__array_function__\"):\n return maybe_truncate(repr(var._data).replace(\"\\n\", \" \"), max_width)\n else:\n", + "test_patch": "diff --git a/xarray/tests/test_formatting.py b/xarray/tests/test_formatting.py\n--- a/xarray/tests/test_formatting.py\n+++ b/xarray/tests/test_formatting.py\n@@ -7,6 +7,7 @@\n \n import xarray as xr\n from xarray.core import formatting\n+from xarray.core.npcompat import IS_NEP18_ACTIVE\n \n from . import raises_regex\n \n@@ -391,6 +392,44 @@ def test_array_repr(self):\n assert actual == expected\n \n \n+@pytest.mark.skipif(not IS_NEP18_ACTIVE, reason=\"requires __array_function__\")\n+def test_inline_variable_array_repr_custom_repr():\n+ class CustomArray:\n+ def __init__(self, value, attr):\n+ self.value = value\n+ self.attr = attr\n+\n+ def _repr_inline_(self, width):\n+ formatted = f\"({self.attr}) {self.value}\"\n+ if len(formatted) > width:\n+ formatted = f\"({self.attr}) ...\"\n+\n+ return formatted\n+\n+ def __array_function__(self, *args, **kwargs):\n+ return NotImplemented\n+\n+ @property\n+ def shape(self):\n+ return self.value.shape\n+\n+ @property\n+ def dtype(self):\n+ return self.value.dtype\n+\n+ @property\n+ def ndim(self):\n+ return self.value.ndim\n+\n+ value = CustomArray(np.array([20, 40]), \"m\")\n+ variable = xr.Variable(\"x\", value)\n+\n+ max_width = 10\n+ actual = formatting.inline_variable_array_repr(variable, max_width=10)\n+\n+ assert actual == value._repr_inline_(max_width)\n+\n+\n def test_set_numpy_options():\n original_options = np.get_printoptions()\n with formatting.set_numpy_options(threshold=10):\n", + "fail_to_pass": "[\"xarray/tests/test_formatting.py::test_inline_variable_array_repr_custom_repr\"]", + "pass_to_pass": "[\"xarray/tests/test_formatting.py::TestFormatting::test_get_indexer_at_least_n_items\", \"xarray/tests/test_formatting.py::TestFormatting::test_first_n_items\", \"xarray/tests/test_formatting.py::TestFormatting::test_last_n_items\", \"xarray/tests/test_formatting.py::TestFormatting::test_last_item\", \"xarray/tests/test_formatting.py::TestFormatting::test_format_item\", \"xarray/tests/test_formatting.py::TestFormatting::test_format_items\", \"xarray/tests/test_formatting.py::TestFormatting::test_format_array_flat\", \"xarray/tests/test_formatting.py::TestFormatting::test_pretty_print\", \"xarray/tests/test_formatting.py::TestFormatting::test_maybe_truncate\", \"xarray/tests/test_formatting.py::TestFormatting::test_format_timestamp_out_of_bounds\", \"xarray/tests/test_formatting.py::TestFormatting::test_attribute_repr\", \"xarray/tests/test_formatting.py::TestFormatting::test_diff_array_repr\", \"xarray/tests/test_formatting.py::TestFormatting::test_diff_attrs_repr_with_array\", \"xarray/tests/test_formatting.py::TestFormatting::test_diff_dataset_repr\", \"xarray/tests/test_formatting.py::TestFormatting::test_array_repr\", \"xarray/tests/test_formatting.py::test_set_numpy_options\", \"xarray/tests/test_formatting.py::test_short_numpy_repr\", \"xarray/tests/test_formatting.py::test_large_array_repr_length\"]", + "expected_spans": { + "xarray/core/formatting.py": [ + "inline_variable_array_repr" + ] + }, + "test_file_spans": { + "xarray/tests/test_formatting.py": [ + "imports", + "test_set_numpy_options" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-13230", + "repo": "django/django", + "base_commit": "184a6eebb0ef56d5f1b1315a8e666830e37f3f81", + "problem_statement": "Add support for item_comments to syndication framework\nDescription\n\t\nAdd comments argument to feed.add_item() in syndication.views so that item_comments can be defined directly without having to take the detour via item_extra_kwargs .\nAdditionally, comments is already explicitly mentioned in the feedparser, but not implemented in the view.\n", + "golden_patch": "diff --git a/django/contrib/syndication/views.py b/django/contrib/syndication/views.py\n--- a/django/contrib/syndication/views.py\n+++ b/django/contrib/syndication/views.py\n@@ -212,6 +212,7 @@ def get_feed(self, obj, request):\n author_name=author_name,\n author_email=author_email,\n author_link=author_link,\n+ comments=self._get_dynamic_attr('item_comments', item),\n categories=self._get_dynamic_attr('item_categories', item),\n item_copyright=self._get_dynamic_attr('item_copyright', item),\n **self.item_extra_kwargs(item)\n", + "test_patch": "diff --git a/tests/syndication_tests/feeds.py b/tests/syndication_tests/feeds.py\n--- a/tests/syndication_tests/feeds.py\n+++ b/tests/syndication_tests/feeds.py\n@@ -29,6 +29,9 @@ def item_pubdate(self, item):\n def item_updateddate(self, item):\n return item.updated\n \n+ def item_comments(self, item):\n+ return \"%scomments\" % item.get_absolute_url()\n+\n item_author_name = 'Sally Smith'\n item_author_email = 'test@example.com'\n item_author_link = 'http://www.example.com/'\ndiff --git a/tests/syndication_tests/tests.py b/tests/syndication_tests/tests.py\n--- a/tests/syndication_tests/tests.py\n+++ b/tests/syndication_tests/tests.py\n@@ -136,10 +136,20 @@ def test_rss2_feed(self):\n 'guid': 'http://example.com/blog/1/',\n 'pubDate': pub_date,\n 'author': 'test@example.com (Sally Smith)',\n+ 'comments': '/blog/1/comments',\n })\n self.assertCategories(items[0], ['python', 'testing'])\n for item in items:\n- self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])\n+ self.assertChildNodes(item, [\n+ 'title',\n+ 'link',\n+ 'description',\n+ 'guid',\n+ 'category',\n+ 'pubDate',\n+ 'author',\n+ 'comments',\n+ ])\n # Assert that does not have any 'isPermaLink' attribute\n self.assertIsNone(item.getElementsByTagName(\n 'guid')[0].attributes.get('isPermaLink'))\n", + "fail_to_pass": "[\"test_rss2_feed (syndication_tests.tests.SyndicationFeedTest)\"]", + "pass_to_pass": "[\"test_add_domain (syndication_tests.tests.SyndicationFeedTest)\", \"test_atom_feed (syndication_tests.tests.SyndicationFeedTest)\", \"test_atom_feed_published_and_updated_elements (syndication_tests.tests.SyndicationFeedTest)\", \"test_atom_multiple_enclosures (syndication_tests.tests.SyndicationFeedTest)\", \"test_atom_single_enclosure (syndication_tests.tests.SyndicationFeedTest)\", \"test_aware_datetime_conversion (syndication_tests.tests.SyndicationFeedTest)\", \"test_custom_feed_generator (syndication_tests.tests.SyndicationFeedTest)\", \"test_feed_generator_language_attribute (syndication_tests.tests.SyndicationFeedTest)\", \"test_feed_last_modified_time (syndication_tests.tests.SyndicationFeedTest)\", \"test_feed_last_modified_time_naive_date (syndication_tests.tests.SyndicationFeedTest)\", \"test_feed_url (syndication_tests.tests.SyndicationFeedTest)\", \"test_item_link_error (syndication_tests.tests.SyndicationFeedTest)\", \"test_latest_post_date (syndication_tests.tests.SyndicationFeedTest)\", \"test_naive_datetime_conversion (syndication_tests.tests.SyndicationFeedTest)\", \"test_rss091_feed (syndication_tests.tests.SyndicationFeedTest)\", \"test_rss2_feed_guid_permalink_false (syndication_tests.tests.SyndicationFeedTest)\", \"test_rss2_feed_guid_permalink_true (syndication_tests.tests.SyndicationFeedTest)\", \"test_rss2_multiple_enclosures (syndication_tests.tests.SyndicationFeedTest)\", \"test_rss2_single_enclosure (syndication_tests.tests.SyndicationFeedTest)\", \"test_secure_urls (syndication_tests.tests.SyndicationFeedTest)\", \"test_template_context_feed (syndication_tests.tests.SyndicationFeedTest)\", \"test_template_feed (syndication_tests.tests.SyndicationFeedTest)\", \"test_title_escaping (syndication_tests.tests.SyndicationFeedTest)\"]", + "expected_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + }, + "test_file_spans": { + "tests/syndication_tests/feeds.py": [ + "TestRss2Feed:22" + ], + "tests/syndication_tests/tests.py": [ + "SyndicationFeedTest.test_rss2_feed" + ] + }, + "resolved_by": [ + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.item_link", + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.item_link", + "Feed.get_feed" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ], + "tests/syndication_tests/tests.py": [ + "SyndicationFeedTest.test_rss2_feed_guid_permalink_true" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ], + "django/utils/feedgenerator.py": [ + "SyndicationFeed.add_item", + "Rss201rev2Feed.add_item_elements" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "imports", + "add_domain", + "Feed.__call__", + "Feed.item_title", + "Feed.item_link", + "Feed.item_enclosures", + "Feed._get_dynamic_attr", + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "imports", + "add_domain", + "Feed.__call__", + "Feed.item_title", + "Feed.item_link", + "Feed.item_enclosures", + "Feed._get_dynamic_attr", + "Feed.get_feed" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/conf/global_settings.py": [ + "impl:197" + ], + "django/contrib/syndication/views.py": [ + "Feed._get_dynamic_attr", + "Feed.get_feed" + ], + "tests/syndication_tests/models.py": [ + "Entry", + "Entry.Meta", + "Entry.__str__", + "Entry.get_absolute_url" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed._get_dynamic_attr", + "Feed.get_feed" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_object", + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_object", + "Feed.get_feed" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed", + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed", + "Feed.get_feed" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_object", + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_object", + "Feed.get_feed" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.item_extra_kwargs" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.item_extra_kwargs" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_object", + "Feed.get_feed" + ], + "django/utils/feedgenerator.py": [ + "SyndicationFeed.add_item" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_object", + "Feed.get_feed" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.item_extra_kwargs", + "Feed.get_object" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.item_extra_kwargs", + "Feed.get_object" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + }, + "alternative_spans": { + "django/contrib/syndication/views.py": [ + "Feed.get_feed" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240402_sweagent_gpt4", + "spans": { + "django/contrib/syndication/views.py": [ + "Feed.item_extra_kwargs" + ] + } + }, + { + "run_name": "20240509_amazon-q-developer-agent-20240430-dev", + "spans": { + "django/contrib/syndication/views.py": [ + "Feed.item_extra_kwargs", + "Feed.get_object" + ] + } + } + ] + }, + { + "instance_id": "django__django-13265", + "repo": "django/django", + "base_commit": "b2b0711b555fa292751763c2df4fe577c396f265", + "problem_statement": "AlterOrderWithRespectTo() with ForeignKey crash when _order is included in Index().\nDescription\n\t\n\tclass Meta:\n\t\tdb_table = 'look_image'\n\t\torder_with_respect_to = 'look'\n\t\tindexes = [\n\t\t\tmodels.Index(fields=['look', '_order']),\n\t\t\tmodels.Index(fields=['created_at']),\n\t\t\tmodels.Index(fields=['updated_at']),\n\t\t]\nmigrations.CreateModel(\n\t\t\tname='LookImage',\n\t\t\tfields=[\n\t\t\t\t('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n\t\t\t\t('look', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='posts.Look', verbose_name='LOOK')),\n\t\t\t\t('image_url', models.URLField(blank=True, max_length=10000, null=True)),\n\t\t\t\t('image', models.ImageField(max_length=2000, upload_to='')),\n\t\t\t\t('deleted', models.DateTimeField(editable=False, null=True)),\n\t\t\t\t('created_at', models.DateTimeField(auto_now_add=True)),\n\t\t\t\t('updated_at', models.DateTimeField(auto_now=True)),\n\t\t\t],\n\t\t),\n\t\tmigrations.AddIndex(\n\t\t\tmodel_name='lookimage',\n\t\t\tindex=models.Index(fields=['look', '_order'], name='look_image_look_id_eaff30_idx'),\n\t\t),\n\t\tmigrations.AddIndex(\n\t\t\tmodel_name='lookimage',\n\t\t\tindex=models.Index(fields=['created_at'], name='look_image_created_f746cf_idx'),\n\t\t),\n\t\tmigrations.AddIndex(\n\t\t\tmodel_name='lookimage',\n\t\t\tindex=models.Index(fields=['updated_at'], name='look_image_updated_aceaf9_idx'),\n\t\t),\n\t\tmigrations.AlterOrderWithRespectTo(\n\t\t\tname='lookimage',\n\t\t\torder_with_respect_to='look',\n\t\t),\nI added orders_with_respect_to in new model class's Meta class and also made index for '_order' field by combining with other field. And a new migration file based on the model looks like the code above.\nThe problem is operation AlterOrderWithRespectTo after AddIndex of '_order' raising error because '_order' field had not been created yet.\nIt seems to be AlterOrderWithRespectTo has to proceed before AddIndex of '_order'.\n", + "golden_patch": "diff --git a/django/db/migrations/autodetector.py b/django/db/migrations/autodetector.py\n--- a/django/db/migrations/autodetector.py\n+++ b/django/db/migrations/autodetector.py\n@@ -182,12 +182,12 @@ def _detect_changes(self, convert_apps=None, graph=None):\n self.generate_removed_fields()\n self.generate_added_fields()\n self.generate_altered_fields()\n+ self.generate_altered_order_with_respect_to()\n self.generate_altered_unique_together()\n self.generate_altered_index_together()\n self.generate_added_indexes()\n self.generate_added_constraints()\n self.generate_altered_db_table()\n- self.generate_altered_order_with_respect_to()\n \n self._sort_migrations()\n self._build_migration_list(graph)\n@@ -613,6 +613,18 @@ def generate_created_models(self):\n dependencies=list(set(dependencies)),\n )\n # Generate other opns\n+ if order_with_respect_to:\n+ self.add_operation(\n+ app_label,\n+ operations.AlterOrderWithRespectTo(\n+ name=model_name,\n+ order_with_respect_to=order_with_respect_to,\n+ ),\n+ dependencies=[\n+ (app_label, model_name, order_with_respect_to, True),\n+ (app_label, model_name, None, True),\n+ ]\n+ )\n related_dependencies = [\n (app_label, model_name, name, True)\n for name in sorted(related_fields)\n@@ -654,19 +666,6 @@ def generate_created_models(self):\n ),\n dependencies=related_dependencies\n )\n- if order_with_respect_to:\n- self.add_operation(\n- app_label,\n- operations.AlterOrderWithRespectTo(\n- name=model_name,\n- order_with_respect_to=order_with_respect_to,\n- ),\n- dependencies=[\n- (app_label, model_name, order_with_respect_to, True),\n- (app_label, model_name, None, True),\n- ]\n- )\n-\n # Fix relationships if the model changed from a proxy model to a\n # concrete model.\n if (app_label, model_name) in self.old_proxy_keys:\n", + "test_patch": "diff --git a/tests/migrations/test_autodetector.py b/tests/migrations/test_autodetector.py\n--- a/tests/migrations/test_autodetector.py\n+++ b/tests/migrations/test_autodetector.py\n@@ -2151,6 +2151,115 @@ def test_add_model_order_with_respect_to(self):\n )\n self.assertNotIn(\"_order\", [name for name, field in changes['testapp'][0].operations[0].fields])\n \n+ def test_add_model_order_with_respect_to_index_foo_together(self):\n+ changes = self.get_changes([], [\n+ self.book,\n+ ModelState('testapp', 'Author', [\n+ ('id', models.AutoField(primary_key=True)),\n+ ('name', models.CharField(max_length=200)),\n+ ('book', models.ForeignKey('otherapp.Book', models.CASCADE)),\n+ ], options={\n+ 'order_with_respect_to': 'book',\n+ 'index_together': {('name', '_order')},\n+ 'unique_together': {('id', '_order')},\n+ }),\n+ ])\n+ self.assertNumberMigrations(changes, 'testapp', 1)\n+ self.assertOperationTypes(changes, 'testapp', 0, ['CreateModel'])\n+ self.assertOperationAttributes(\n+ changes,\n+ 'testapp',\n+ 0,\n+ 0,\n+ name='Author',\n+ options={\n+ 'order_with_respect_to': 'book',\n+ 'index_together': {('name', '_order')},\n+ 'unique_together': {('id', '_order')},\n+ },\n+ )\n+\n+ def test_add_model_order_with_respect_to_index_constraint(self):\n+ tests = [\n+ (\n+ 'AddIndex',\n+ {'indexes': [\n+ models.Index(fields=['_order'], name='book_order_idx'),\n+ ]},\n+ ),\n+ (\n+ 'AddConstraint',\n+ {'constraints': [\n+ models.CheckConstraint(\n+ check=models.Q(_order__gt=1),\n+ name='book_order_gt_1',\n+ ),\n+ ]},\n+ ),\n+ ]\n+ for operation, extra_option in tests:\n+ with self.subTest(operation=operation):\n+ after = ModelState('testapp', 'Author', [\n+ ('id', models.AutoField(primary_key=True)),\n+ ('name', models.CharField(max_length=200)),\n+ ('book', models.ForeignKey('otherapp.Book', models.CASCADE)),\n+ ], options={\n+ 'order_with_respect_to': 'book',\n+ **extra_option,\n+ })\n+ changes = self.get_changes([], [self.book, after])\n+ self.assertNumberMigrations(changes, 'testapp', 1)\n+ self.assertOperationTypes(changes, 'testapp', 0, [\n+ 'CreateModel', operation,\n+ ])\n+ self.assertOperationAttributes(\n+ changes,\n+ 'testapp',\n+ 0,\n+ 0,\n+ name='Author',\n+ options={'order_with_respect_to': 'book'},\n+ )\n+\n+ def test_set_alter_order_with_respect_to_index_constraint_foo_together(self):\n+ tests = [\n+ (\n+ 'AddIndex',\n+ {'indexes': [\n+ models.Index(fields=['_order'], name='book_order_idx'),\n+ ]},\n+ ),\n+ (\n+ 'AddConstraint',\n+ {'constraints': [\n+ models.CheckConstraint(\n+ check=models.Q(_order__gt=1),\n+ name='book_order_gt_1',\n+ ),\n+ ]},\n+ ),\n+ ('AlterIndexTogether', {'index_together': {('name', '_order')}}),\n+ ('AlterUniqueTogether', {'unique_together': {('id', '_order')}}),\n+ ]\n+ for operation, extra_option in tests:\n+ with self.subTest(operation=operation):\n+ after = ModelState('testapp', 'Author', [\n+ ('id', models.AutoField(primary_key=True)),\n+ ('name', models.CharField(max_length=200)),\n+ ('book', models.ForeignKey('otherapp.Book', models.CASCADE)),\n+ ], options={\n+ 'order_with_respect_to': 'book',\n+ **extra_option,\n+ })\n+ changes = self.get_changes(\n+ [self.book, self.author_with_book],\n+ [self.book, after],\n+ )\n+ self.assertNumberMigrations(changes, 'testapp', 1)\n+ self.assertOperationTypes(changes, 'testapp', 0, [\n+ 'AlterOrderWithRespectTo', operation,\n+ ])\n+\n def test_alter_model_managers(self):\n \"\"\"\n Changing the model managers adds a new operation.\n", + "fail_to_pass": "[\"test_add_model_order_with_respect_to_index_constraint (migrations.test_autodetector.AutodetectorTests)\", \"test_add_model_order_with_respect_to_index_foo_together (migrations.test_autodetector.AutodetectorTests)\", \"test_set_alter_order_with_respect_to_index_constraint_foo_together (migrations.test_autodetector.AutodetectorTests)\", \"test_supports_functools_partial (migrations.test_autodetector.AutodetectorTests)\"]", + "pass_to_pass": "[\"test_auto (migrations.test_autodetector.MigrationSuggestNameTests)\", \"test_none_name (migrations.test_autodetector.MigrationSuggestNameTests)\", \"test_none_name_with_initial_true (migrations.test_autodetector.MigrationSuggestNameTests)\", \"test_single_operation (migrations.test_autodetector.MigrationSuggestNameTests)\", \"test_two_create_models (migrations.test_autodetector.MigrationSuggestNameTests)\", \"test_two_create_models_with_initial_true (migrations.test_autodetector.MigrationSuggestNameTests)\", \"test_add_alter_order_with_respect_to (migrations.test_autodetector.AutodetectorTests)\", \"test_add_blank_textfield_and_charfield (migrations.test_autodetector.AutodetectorTests)\", \"Test change detection of new constraints.\", \"test_add_date_fields_with_auto_now_add_asking_for_default (migrations.test_autodetector.AutodetectorTests)\", \"test_add_date_fields_with_auto_now_add_not_asking_for_null_addition (migrations.test_autodetector.AutodetectorTests)\", \"test_add_date_fields_with_auto_now_not_asking_for_default (migrations.test_autodetector.AutodetectorTests)\", \"Tests autodetection of new fields.\", \"test_add_field_and_foo_together (migrations.test_autodetector.AutodetectorTests)\", \"#22030 - Adding a field with a default should work.\", \"Tests index/unique_together detection.\", \"Test change detection of new indexes.\", \"#22435 - Adding a ManyToManyField should not prompt for a default.\", \"test_add_model_order_with_respect_to (migrations.test_autodetector.AutodetectorTests)\", \"test_add_model_with_field_removed_from_base_model (migrations.test_autodetector.AutodetectorTests)\", \"test_add_non_blank_textfield_and_charfield (migrations.test_autodetector.AutodetectorTests)\", \"Tests detection for adding db_table in model's options.\", \"Tests detection for changing db_table in model's options'.\", \"test_alter_db_table_no_changes (migrations.test_autodetector.AutodetectorTests)\", \"Tests detection for removing db_table in model's options.\", \"test_alter_db_table_with_model_change (migrations.test_autodetector.AutodetectorTests)\", \"test_alter_field_to_fk_dependency_other_app (migrations.test_autodetector.AutodetectorTests)\", \"test_alter_field_to_not_null_oneoff_default (migrations.test_autodetector.AutodetectorTests)\", \"test_alter_field_to_not_null_with_default (migrations.test_autodetector.AutodetectorTests)\", \"test_alter_field_to_not_null_without_default (migrations.test_autodetector.AutodetectorTests)\", \"test_alter_fk_before_model_deletion (migrations.test_autodetector.AutodetectorTests)\", \"test_alter_many_to_many (migrations.test_autodetector.AutodetectorTests)\", \"test_alter_model_managers (migrations.test_autodetector.AutodetectorTests)\", \"Changing a model's options should make a change.\", \"Changing a proxy model's options should also make a change.\", \"Tests auto-naming of migrations for graph matching.\", \"test_arrange_for_graph_with_multiple_initial (migrations.test_autodetector.AutodetectorTests)\", \"Bases of other models come first.\", \"test_circular_dependency_mixed_addcreate (migrations.test_autodetector.AutodetectorTests)\", \"test_circular_dependency_swappable (migrations.test_autodetector.AutodetectorTests)\", \"test_circular_dependency_swappable2 (migrations.test_autodetector.AutodetectorTests)\", \"test_circular_dependency_swappable_self (migrations.test_autodetector.AutodetectorTests)\", \"test_circular_fk_dependency (migrations.test_autodetector.AutodetectorTests)\", \"test_concrete_field_changed_to_many_to_many (migrations.test_autodetector.AutodetectorTests)\", \"test_create_model_and_unique_together (migrations.test_autodetector.AutodetectorTests)\", \"Test creation of new model with constraints already defined.\", \"Test creation of new model with indexes already defined.\", \"test_create_with_through_model (migrations.test_autodetector.AutodetectorTests)\", \"test_custom_deconstructible (migrations.test_autodetector.AutodetectorTests)\", \"Tests custom naming of migrations for graph matching.\", \"Field instances are handled correctly by nested deconstruction.\", \"test_deconstruct_type (migrations.test_autodetector.AutodetectorTests)\", \"Nested deconstruction descends into dict values.\", \"Nested deconstruction descends into lists.\", \"Nested deconstruction descends into tuples.\", \"test_default_related_name_option (migrations.test_autodetector.AutodetectorTests)\", \"test_different_regex_does_alter (migrations.test_autodetector.AutodetectorTests)\", \"test_empty_foo_together (migrations.test_autodetector.AutodetectorTests)\", \"test_first_dependency (migrations.test_autodetector.AutodetectorTests)\", \"Having a ForeignKey automatically adds a dependency.\", \"test_fk_dependency_other_app (migrations.test_autodetector.AutodetectorTests)\", \"test_foo_together_no_changes (migrations.test_autodetector.AutodetectorTests)\", \"test_foo_together_ordering (migrations.test_autodetector.AutodetectorTests)\", \"Tests unique_together and field removal detection & ordering\", \"test_foreign_key_removed_before_target_model (migrations.test_autodetector.AutodetectorTests)\", \"test_identical_regex_doesnt_alter (migrations.test_autodetector.AutodetectorTests)\", \"test_keep_db_table_with_model_change (migrations.test_autodetector.AutodetectorTests)\", \"test_last_dependency (migrations.test_autodetector.AutodetectorTests)\", \"test_m2m_w_through_multistep_remove (migrations.test_autodetector.AutodetectorTests)\", \"test_managed_to_unmanaged (migrations.test_autodetector.AutodetectorTests)\", \"test_many_to_many_changed_to_concrete_field (migrations.test_autodetector.AutodetectorTests)\", \"test_many_to_many_removed_before_through_model (migrations.test_autodetector.AutodetectorTests)\", \"test_many_to_many_removed_before_through_model_2 (migrations.test_autodetector.AutodetectorTests)\", \"test_mti_inheritance_model_removal (migrations.test_autodetector.AutodetectorTests)\", \"#23956 - Inheriting models doesn't move *_ptr fields into AddField operations.\", \"test_nested_deconstructible_objects (migrations.test_autodetector.AutodetectorTests)\", \"Tests autodetection of new models.\", \"test_non_circular_foreignkey_dependency_removal (migrations.test_autodetector.AutodetectorTests)\", \"Tests deletion of old models.\", \"Test change detection of reordering of fields in indexes.\", \"test_pk_fk_included (migrations.test_autodetector.AutodetectorTests)\", \"The autodetector correctly deals with proxy models.\", \"Bases of proxies come first.\", \"test_proxy_custom_pk (migrations.test_autodetector.AutodetectorTests)\", \"FK dependencies still work on proxy models.\", \"test_proxy_to_mti_with_fk_to_proxy (migrations.test_autodetector.AutodetectorTests)\", \"test_proxy_to_mti_with_fk_to_proxy_proxy (migrations.test_autodetector.AutodetectorTests)\", \"test_remove_alter_order_with_respect_to (migrations.test_autodetector.AutodetectorTests)\", \"Test change detection of removed constraints.\", \"Tests autodetection of removed fields.\", \"test_remove_field_and_foo_together (migrations.test_autodetector.AutodetectorTests)\", \"Test change detection of removed indexes.\", \"Tests autodetection of renamed fields.\", \"test_rename_field_and_foo_together (migrations.test_autodetector.AutodetectorTests)\", \"test_rename_field_foreign_key_to_field (migrations.test_autodetector.AutodetectorTests)\", \"test_rename_field_preserved_db_column (migrations.test_autodetector.AutodetectorTests)\", \"test_rename_foreign_object_fields (migrations.test_autodetector.AutodetectorTests)\", \"test_rename_m2m_through_model (migrations.test_autodetector.AutodetectorTests)\", \"Tests autodetection of renamed models.\", \"test_rename_model_case (migrations.test_autodetector.AutodetectorTests)\", \"test_rename_model_reverse_relation_dependencies (migrations.test_autodetector.AutodetectorTests)\", \"test_rename_model_with_fks_in_different_position (migrations.test_autodetector.AutodetectorTests)\", \"test_rename_model_with_renamed_rel_field (migrations.test_autodetector.AutodetectorTests)\", \"test_rename_referenced_primary_key (migrations.test_autodetector.AutodetectorTests)\", \"test_rename_related_field_preserved_db_column (migrations.test_autodetector.AutodetectorTests)\", \"test_replace_string_with_foreignkey (migrations.test_autodetector.AutodetectorTests)\", \"test_same_app_circular_fk_dependency (migrations.test_autodetector.AutodetectorTests)\", \"test_same_app_circular_fk_dependency_with_unique_together_and_indexes (migrations.test_autodetector.AutodetectorTests)\", \"test_same_app_no_fk_dependency (migrations.test_autodetector.AutodetectorTests)\", \"Setting order_with_respect_to adds a field.\", \"test_swappable (migrations.test_autodetector.AutodetectorTests)\", \"test_swappable_changed (migrations.test_autodetector.AutodetectorTests)\", \"test_swappable_circular_multi_mti (migrations.test_autodetector.AutodetectorTests)\", \"Swappable models get their CreateModel first.\", \"test_trim_apps (migrations.test_autodetector.AutodetectorTests)\", \"The autodetector correctly deals with managed models.\", \"test_unmanaged_custom_pk (migrations.test_autodetector.AutodetectorTests)\", \"test_unmanaged_delete (migrations.test_autodetector.AutodetectorTests)\", \"test_unmanaged_to_managed (migrations.test_autodetector.AutodetectorTests)\"]", + "expected_spans": { + "django/db/migrations/autodetector.py": [ + "MigrationAutodetector._detect_changes", + "MigrationAutodetector.generate_created_models" + ] + }, + "test_file_spans": { + "tests/migrations/test_autodetector.py": [ + "AutodetectorTests.test_alter_model_managers" + ] + }, + "resolved_by": [ + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/backends/base/schema.py": [ + "BaseDatabaseSchemaEditor.__exit__", + "BaseDatabaseSchemaEditor.add_index", + "BaseDatabaseSchemaEditor.add_field" + ], + "django/db/migrations/autodetector.py": [ + "MigrationAutodetector._sort_migrations", + "MigrationAutodetector.add_operation" + ], + "django/db/migrations/operations/models.py": [ + "AlterOrderWithRespectTo.database_forwards", + "AlterOrderWithRespectTo.references_field", + "AddIndex" + ], + "django/db/models/indexes.py": [ + "Index.create_sql" + ] + }, + "alternative_spans": { + "django/db/migrations/autodetector.py": [ + "MigrationAutodetector._sort_migrations", + "MigrationAutodetector.add_operation" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "spans": { + "django/db/migrations/autodetector.py": [ + "MigrationAutodetector._sort_migrations", + "MigrationAutodetector.add_operation" + ] + } + } + ] + }, + { + "instance_id": "django__django-13315", + "repo": "django/django", + "base_commit": "36bc47069ce071e80c8129500de3b8664d2058a7", + "problem_statement": "limit_choices_to on a ForeignKey can render duplicate options in formfield\nDescription\n\t\nIf you pass a Q object as limit_choices_to on a ForeignKey field involving a join, you may end up with duplicate options in your form.\nSee regressiontest in patch for a clear view on the problem.\n", + "golden_patch": "diff --git a/django/forms/models.py b/django/forms/models.py\n--- a/django/forms/models.py\n+++ b/django/forms/models.py\n@@ -97,10 +97,18 @@ def model_to_dict(instance, fields=None, exclude=None):\n \n def apply_limit_choices_to_to_formfield(formfield):\n \"\"\"Apply limit_choices_to to the formfield's queryset if needed.\"\"\"\n+ from django.db.models import Exists, OuterRef, Q\n if hasattr(formfield, 'queryset') and hasattr(formfield, 'get_limit_choices_to'):\n limit_choices_to = formfield.get_limit_choices_to()\n- if limit_choices_to is not None:\n- formfield.queryset = formfield.queryset.complex_filter(limit_choices_to)\n+ if limit_choices_to:\n+ complex_filter = limit_choices_to\n+ if not isinstance(complex_filter, Q):\n+ complex_filter = Q(**limit_choices_to)\n+ complex_filter &= Q(pk=OuterRef('pk'))\n+ # Use Exists() to avoid potential duplicates.\n+ formfield.queryset = formfield.queryset.filter(\n+ Exists(formfield.queryset.model._base_manager.filter(complex_filter)),\n+ )\n \n \n def fields_for_model(model, fields=None, exclude=None, widgets=None,\n", + "test_patch": "diff --git a/tests/model_forms/models.py b/tests/model_forms/models.py\n--- a/tests/model_forms/models.py\n+++ b/tests/model_forms/models.py\n@@ -411,9 +411,14 @@ class StumpJoke(models.Model):\n Character,\n models.CASCADE,\n limit_choices_to=today_callable_dict,\n- related_name=\"+\",\n+ related_name='jokes',\n )\n- has_fooled_today = models.ManyToManyField(Character, limit_choices_to=today_callable_q, related_name=\"+\")\n+ has_fooled_today = models.ManyToManyField(\n+ Character,\n+ limit_choices_to=today_callable_q,\n+ related_name='jokes_today',\n+ )\n+ funny = models.BooleanField(default=False)\n \n \n # Model for #13776\ndiff --git a/tests/model_forms/tests.py b/tests/model_forms/tests.py\n--- a/tests/model_forms/tests.py\n+++ b/tests/model_forms/tests.py\n@@ -16,6 +16,7 @@\n )\n from django.template import Context, Template\n from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature\n+from django.test.utils import isolate_apps\n \n from .models import (\n Article, ArticleStatus, Author, Author1, Award, BetterWriter, BigInt, Book,\n@@ -2829,6 +2830,72 @@ def test_callable_called_each_time_form_is_instantiated(self):\n StumpJokeForm()\n self.assertEqual(today_callable_dict.call_count, 3)\n \n+ @isolate_apps('model_forms')\n+ def test_limit_choices_to_no_duplicates(self):\n+ joke1 = StumpJoke.objects.create(\n+ funny=True,\n+ most_recently_fooled=self.threepwood,\n+ )\n+ joke2 = StumpJoke.objects.create(\n+ funny=True,\n+ most_recently_fooled=self.threepwood,\n+ )\n+ joke3 = StumpJoke.objects.create(\n+ funny=True,\n+ most_recently_fooled=self.marley,\n+ )\n+ StumpJoke.objects.create(funny=False, most_recently_fooled=self.marley)\n+ joke1.has_fooled_today.add(self.marley, self.threepwood)\n+ joke2.has_fooled_today.add(self.marley)\n+ joke3.has_fooled_today.add(self.marley, self.threepwood)\n+\n+ class CharacterDetails(models.Model):\n+ character1 = models.ForeignKey(\n+ Character,\n+ models.CASCADE,\n+ limit_choices_to=models.Q(\n+ jokes__funny=True,\n+ jokes_today__funny=True,\n+ ),\n+ related_name='details_fk_1',\n+ )\n+ character2 = models.ForeignKey(\n+ Character,\n+ models.CASCADE,\n+ limit_choices_to={\n+ 'jokes__funny': True,\n+ 'jokes_today__funny': True,\n+ },\n+ related_name='details_fk_2',\n+ )\n+ character3 = models.ManyToManyField(\n+ Character,\n+ limit_choices_to=models.Q(\n+ jokes__funny=True,\n+ jokes_today__funny=True,\n+ ),\n+ related_name='details_m2m_1',\n+ )\n+\n+ class CharacterDetailsForm(forms.ModelForm):\n+ class Meta:\n+ model = CharacterDetails\n+ fields = '__all__'\n+\n+ form = CharacterDetailsForm()\n+ self.assertCountEqual(\n+ form.fields['character1'].queryset,\n+ [self.marley, self.threepwood],\n+ )\n+ self.assertCountEqual(\n+ form.fields['character2'].queryset,\n+ [self.marley, self.threepwood],\n+ )\n+ self.assertCountEqual(\n+ form.fields['character3'].queryset,\n+ [self.marley, self.threepwood],\n+ )\n+\n \n class FormFieldCallbackTests(SimpleTestCase):\n \n", + "fail_to_pass": "[\"test_limit_choices_to_no_duplicates (model_forms.tests.LimitChoicesToTests)\"]", + "pass_to_pass": "[\"test_modelform_factory_metaclass (model_forms.tests.CustomMetaclassTestCase)\", \"test_bad_callback (model_forms.tests.FormFieldCallbackTests)\", \"Regression for #13095: Using base forms with widgets defined in Meta should not raise errors.\", \"A custom formfield_callback is used if provided\", \"Regression for #15315: modelform_factory should accept widgets\", \"test_inherit_after_custom_callback (model_forms.tests.FormFieldCallbackTests)\", \"Regression for #19733\", \"test_custom_error_messages (model_forms.tests.ModelFormCustomErrorTests)\", \"test_model_clean_error_messages (model_forms.tests.ModelFormCustomErrorTests)\", \"test_setattr_raises_validation_error_field_specific (model_forms.tests.StrictAssignmentTests)\", \"test_setattr_raises_validation_error_non_field (model_forms.tests.StrictAssignmentTests)\", \"test_notrequired_overrides_notblank (model_forms.tests.ValidationTest)\", \"test_validates_with_replaced_field_excluded (model_forms.tests.ValidationTest)\", \"test_validates_with_replaced_field_not_specified (model_forms.tests.ValidationTest)\", \"test_override_clean (model_forms.tests.CustomCleanTests)\", \"test_field_removal (model_forms.tests.ModelFormInheritanceTests)\", \"test_field_removal_name_clashes (model_forms.tests.ModelFormInheritanceTests)\", \"test_form_subclass_inheritance (model_forms.tests.ModelFormInheritanceTests)\", \"test_model_form_applies_localize_to_all_fields (model_forms.tests.LocalizedModelFormTest)\", \"test_model_form_applies_localize_to_some_fields (model_forms.tests.LocalizedModelFormTest)\", \"test_model_form_refuses_arbitrary_string (model_forms.tests.LocalizedModelFormTest)\", \"Data for a ManyToManyField is a list rather than a lazy QuerySet.\", \"test_callable_called_each_time_form_is_instantiated (model_forms.tests.LimitChoicesToTests)\", \"test_custom_field_with_queryset_but_no_limit_choices_to (model_forms.tests.LimitChoicesToTests)\", \"test_fields_for_model_applies_limit_choices_to (model_forms.tests.LimitChoicesToTests)\", \"test_limit_choices_to_callable_for_fk_rel (model_forms.tests.LimitChoicesToTests)\", \"test_limit_choices_to_callable_for_m2m_rel (model_forms.tests.LimitChoicesToTests)\", \"test_assignment_of_none (model_forms.tests.ModelOneToOneFieldTests)\", \"test_assignment_of_none_null_false (model_forms.tests.ModelOneToOneFieldTests)\", \"test_modelform_onetoonefield (model_forms.tests.ModelOneToOneFieldTests)\", \"test_modelform_subclassed_model (model_forms.tests.ModelOneToOneFieldTests)\", \"test_onetoonefield (model_forms.tests.ModelOneToOneFieldTests)\", \"test_article_form (model_forms.tests.ModelFormBaseTest)\", \"test_bad_form (model_forms.tests.ModelFormBaseTest)\", \"test_base_form (model_forms.tests.ModelFormBaseTest)\", \"test_blank_false_with_null_true_foreign_key_field (model_forms.tests.ModelFormBaseTest)\", \"test_blank_foreign_key_with_radio (model_forms.tests.ModelFormBaseTest)\", \"test_blank_with_null_foreign_key_field (model_forms.tests.ModelFormBaseTest)\", \"test_confused_form (model_forms.tests.ModelFormBaseTest)\", \"test_default_filefield (model_forms.tests.ModelFormBaseTest)\", \"test_default_not_populated_on_checkboxselectmultiple (model_forms.tests.ModelFormBaseTest)\", \"test_default_not_populated_on_non_empty_value_in_cleaned_data (model_forms.tests.ModelFormBaseTest)\", \"test_default_not_populated_on_optional_checkbox_input (model_forms.tests.ModelFormBaseTest)\", \"test_default_not_populated_on_selectmultiple (model_forms.tests.ModelFormBaseTest)\", \"test_default_populated_on_optional_field (model_forms.tests.ModelFormBaseTest)\", \"test_default_selectdatewidget (model_forms.tests.ModelFormBaseTest)\", \"test_default_splitdatetime_field (model_forms.tests.ModelFormBaseTest)\", \"test_empty_fields_on_modelform (model_forms.tests.ModelFormBaseTest)\", \"test_empty_fields_to_construct_instance (model_forms.tests.ModelFormBaseTest)\", \"test_empty_fields_to_fields_for_model (model_forms.tests.ModelFormBaseTest)\", \"test_exclude_and_validation (model_forms.tests.ModelFormBaseTest)\", \"test_exclude_fields (model_forms.tests.ModelFormBaseTest)\", \"test_exclude_fields_with_string (model_forms.tests.ModelFormBaseTest)\", \"test_exclude_nonexistent_field (model_forms.tests.ModelFormBaseTest)\", \"test_extra_declared_field_model_form (model_forms.tests.ModelFormBaseTest)\", \"test_extra_field_model_form (model_forms.tests.ModelFormBaseTest)\", \"test_extra_field_modelform_factory (model_forms.tests.ModelFormBaseTest)\", \"test_extra_fields (model_forms.tests.ModelFormBaseTest)\", \"test_invalid_meta_model (model_forms.tests.ModelFormBaseTest)\", \"test_limit_fields_with_string (model_forms.tests.ModelFormBaseTest)\", \"test_limit_nonexistent_field (model_forms.tests.ModelFormBaseTest)\", \"test_missing_fields_attribute (model_forms.tests.ModelFormBaseTest)\", \"test_mixmodel_form (model_forms.tests.ModelFormBaseTest)\", \"test_no_model_class (model_forms.tests.ModelFormBaseTest)\", \"test_non_blank_foreign_key_with_radio (model_forms.tests.ModelFormBaseTest)\", \"test_orderfields2_form (model_forms.tests.ModelFormBaseTest)\", \"test_orderfields_form (model_forms.tests.ModelFormBaseTest)\", \"test_override_field (model_forms.tests.ModelFormBaseTest)\", \"test_prefixed_form_with_default_field (model_forms.tests.ModelFormBaseTest)\", \"test_renderer_kwarg (model_forms.tests.ModelFormBaseTest)\", \"test_replace_field (model_forms.tests.ModelFormBaseTest)\", \"test_replace_field_variant_2 (model_forms.tests.ModelFormBaseTest)\", \"test_replace_field_variant_3 (model_forms.tests.ModelFormBaseTest)\", \"test_save_blank_false_with_required_false (model_forms.tests.ModelFormBaseTest)\", \"test_save_blank_null_unique_charfield_saves_null (model_forms.tests.ModelFormBaseTest)\", \"test_subcategory_form (model_forms.tests.ModelFormBaseTest)\", \"test_subclassmeta_form (model_forms.tests.ModelFormBaseTest)\", \"test_callable_field_default (model_forms.tests.OtherModelFormTests)\", \"test_choices_type (model_forms.tests.OtherModelFormTests)\", \"test_foreignkeys_which_use_to_field (model_forms.tests.OtherModelFormTests)\", \"test_iterable_model_m2m (model_forms.tests.OtherModelFormTests)\", \"test_media_on_modelform (model_forms.tests.OtherModelFormTests)\", \"test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields (model_forms.tests.OtherModelFormTests)\", \"test_prefetch_related_queryset (model_forms.tests.OtherModelFormTests)\", \"test_clean_does_deduplicate_values (model_forms.tests.ModelMultipleChoiceFieldTests)\", \"test_model_multiple_choice_field (model_forms.tests.ModelMultipleChoiceFieldTests)\", \"test_model_multiple_choice_field_22745 (model_forms.tests.ModelMultipleChoiceFieldTests)\", \"test_model_multiple_choice_number_of_queries (model_forms.tests.ModelMultipleChoiceFieldTests)\", \"test_model_multiple_choice_required_false (model_forms.tests.ModelMultipleChoiceFieldTests)\", \"test_model_multiple_choice_run_validators (model_forms.tests.ModelMultipleChoiceFieldTests)\", \"test_model_multiple_choice_show_hidden_initial (model_forms.tests.ModelMultipleChoiceFieldTests)\", \"test_show_hidden_initial_changed_queries_efficiently (model_forms.tests.ModelMultipleChoiceFieldTests)\", \"test_to_field_name_with_initial_data (model_forms.tests.ModelMultipleChoiceFieldTests)\", \"test_big_integer_field (model_forms.tests.ModelOtherFieldTests)\", \"test_http_prefixing (model_forms.tests.ModelOtherFieldTests)\", \"test_modelform_non_editable_field (model_forms.tests.ModelOtherFieldTests)\", \"Check basic URL field validation on model forms\", \"test_error_messages_overrides (model_forms.tests.TestFieldOverridesByFormMeta)\", \"test_field_type_overrides (model_forms.tests.TestFieldOverridesByFormMeta)\", \"test_help_text_overrides (model_forms.tests.TestFieldOverridesByFormMeta)\", \"test_label_overrides (model_forms.tests.TestFieldOverridesByFormMeta)\", \"test_widget_overrides (model_forms.tests.TestFieldOverridesByFormMeta)\", \"test_abstract_inherited_unique (model_forms.tests.UniqueTest)\", \"test_abstract_inherited_unique_together (model_forms.tests.UniqueTest)\", \"Ensure keys and blank character strings are tested for uniqueness.\", \"Test for primary_key being in the form and failing validation.\", \"test_inherited_unique (model_forms.tests.UniqueTest)\", \"test_inherited_unique_for_date (model_forms.tests.UniqueTest)\", \"test_inherited_unique_together (model_forms.tests.UniqueTest)\", \"test_multiple_field_unique_together (model_forms.tests.UniqueTest)\", \"test_override_unique_for_date_message (model_forms.tests.UniqueTest)\", \"test_override_unique_message (model_forms.tests.UniqueTest)\", \"test_override_unique_together_message (model_forms.tests.UniqueTest)\", \"test_simple_unique (model_forms.tests.UniqueTest)\", \"test_unique_for_date (model_forms.tests.UniqueTest)\", \"test_unique_for_date_in_exclude (model_forms.tests.UniqueTest)\", \"test_unique_for_date_with_nullable_date (model_forms.tests.UniqueTest)\", \"test_unique_null (model_forms.tests.UniqueTest)\", \"ModelForm test of unique_together constraint\", \"test_unique_together_exclusion (model_forms.tests.UniqueTest)\", \"test_auto_id (model_forms.tests.ModelFormBasicTests)\", \"test_base_form (model_forms.tests.ModelFormBasicTests)\", \"test_basic_creation (model_forms.tests.ModelFormBasicTests)\", \"test_custom_form_fields (model_forms.tests.ModelFormBasicTests)\", \"test_initial_values (model_forms.tests.ModelFormBasicTests)\", \"test_m2m_editing (model_forms.tests.ModelFormBasicTests)\", \"test_m2m_initial_callable (model_forms.tests.ModelFormBasicTests)\", \"test_multi_fields (model_forms.tests.ModelFormBasicTests)\", \"test_recleaning_model_form_instance (model_forms.tests.ModelFormBasicTests)\", \"test_runtime_choicefield_populated (model_forms.tests.ModelFormBasicTests)\", \"test_save_commit_false (model_forms.tests.ModelFormBasicTests)\", \"test_save_with_data_errors (model_forms.tests.ModelFormBasicTests)\", \"test_subset_fields (model_forms.tests.ModelFormBasicTests)\", \"test_validate_foreign_key_to_model_with_overridden_manager (model_forms.tests.ModelFormBasicTests)\", \"test_validate_foreign_key_uses_default_manager (model_forms.tests.ModelFormBasicTests)\", \"test_clean_false (model_forms.tests.FileAndImageFieldTests)\", \"test_clean_false_required (model_forms.tests.FileAndImageFieldTests)\", \"test_clear_and_file_contradiction (model_forms.tests.FileAndImageFieldTests)\", \"test_custom_file_field_save (model_forms.tests.FileAndImageFieldTests)\", \"test_file_field_data (model_forms.tests.FileAndImageFieldTests)\", \"test_file_field_multiple_save (model_forms.tests.FileAndImageFieldTests)\", \"FilePathField(blank=True) includes the empty option.\", \"test_filefield_required_false (model_forms.tests.FileAndImageFieldTests)\", \"test_full_clear (model_forms.tests.FileAndImageFieldTests)\", \"test_image_field (model_forms.tests.FileAndImageFieldTests)\", \"test_render_empty_file_field (model_forms.tests.FileAndImageFieldTests)\"]", + "expected_spans": { + "django/forms/models.py": [ + "apply_limit_choices_to_to_formfield" + ] + }, + "test_file_spans": { + "tests/model_forms/models.py": [ + "StumpJoke" + ], + "tests/model_forms/tests.py": [ + "imports", + "LimitChoicesToTests" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/db/models/fields/related.py": [ + "RelatedField.formfield" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField._set_queryset" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField._set_queryset" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/forms/models.py": [ + "apply_limit_choices_to_to_formfield" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "apply_limit_choices_to_to_formfield" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.get_choices" + ], + "django/db/models/fields/reverse_related.py": [ + "ForeignObjectRel.get_choices" + ], + "django/forms/models.py": [ + "ModelChoiceField.__init__" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField.__init__" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "django/db/models/fields/related.py": [ + "ForeignKey.formfield" + ], + "django/forms/models.py": [ + "apply_limit_choices_to_to_formfield" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "apply_limit_choices_to_to_formfield" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/forms/models.py": [ + "imports", + "construct_instance", + "model_to_dict", + "apply_limit_choices_to_to_formfield", + "fields_for_model", + "ModelFormOptions", + "ModelFormMetaclass.__new__", + "BaseModelForm.__init__", + "BaseModelForm._get_validation_exclusions", + "BaseModelForm._update_errors", + "BaseModelForm._post_clean", + "BaseModelForm._save_m2m", + "BaseModelForm.save", + "modelform_factory", + "BaseModelFormSet", + "BaseModelFormSet.__init__", + "BaseModelFormSet._construct_form", + "BaseModelFormSet.get_queryset", + "BaseModelFormSet.save", + "BaseModelFormSet:7", + "BaseModelFormSet.clean", + "BaseModelFormSet.validate_unique", + "BaseModelFormSet.get_unique_error_message", + "BaseModelFormSet.get_date_error_message", + "BaseModelFormSet.get_form_error", + "BaseModelFormSet.save_existing_objects", + "BaseModelFormSet.save_new_objects", + "BaseModelFormSet.add_fields", + "modelformset_factory", + "BaseInlineFormSet", + "BaseInlineFormSet.__init__", + "BaseInlineFormSet._construct_form", + "BaseInlineFormSet.get_default_prefix", + "BaseInlineFormSet.save_new", + "BaseInlineFormSet.add_fields", + "BaseInlineFormSet.get_unique_error_message", + "_get_foreign_key", + "inlineformset_factory", + "InlineForeignKeyField", + "InlineForeignKeyField.__init__", + "InlineForeignKeyField.clean", + "ModelChoiceIteratorValue.__init__", + "ModelChoiceIterator.__init__", + "ModelChoiceIterator.__iter__", + "ModelChoiceIterator.__len__", + "ModelChoiceIterator.__bool__", + "ModelChoiceIterator.choice", + "ModelChoiceField", + "ModelChoiceField.__init__", + "ModelChoiceField.__deepcopy__", + "ModelChoiceField:7", + "ModelChoiceField._get_choices", + "ModelChoiceField.to_python", + "ModelMultipleChoiceField", + "ModelMultipleChoiceField.__init__", + "ModelMultipleChoiceField.clean", + "ModelMultipleChoiceField._check_values", + "ModelMultipleChoiceField.prepare_value", + "modelform_defines_fields" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "imports", + "construct_instance", + "model_to_dict", + "apply_limit_choices_to_to_formfield", + "fields_for_model", + "ModelFormOptions", + "ModelFormMetaclass.__new__", + "BaseModelForm.__init__", + "BaseModelForm._get_validation_exclusions", + "BaseModelForm._update_errors", + "BaseModelForm._post_clean", + "BaseModelForm._save_m2m", + "BaseModelForm.save", + "modelform_factory", + "BaseModelFormSet", + "BaseModelFormSet.__init__", + "BaseModelFormSet._construct_form", + "BaseModelFormSet.get_queryset", + "BaseModelFormSet.save", + "BaseModelFormSet:7", + "BaseModelFormSet.clean", + "BaseModelFormSet.validate_unique", + "BaseModelFormSet.get_unique_error_message", + "BaseModelFormSet.get_date_error_message", + "BaseModelFormSet.get_form_error", + "BaseModelFormSet.save_existing_objects", + "BaseModelFormSet.save_new_objects", + "BaseModelFormSet.add_fields", + "modelformset_factory", + "BaseInlineFormSet", + "BaseInlineFormSet.__init__", + "BaseInlineFormSet._construct_form", + "BaseInlineFormSet.get_default_prefix", + "BaseInlineFormSet.save_new", + "BaseInlineFormSet.add_fields", + "BaseInlineFormSet.get_unique_error_message", + "_get_foreign_key", + "inlineformset_factory", + "InlineForeignKeyField", + "InlineForeignKeyField.__init__", + "InlineForeignKeyField.clean", + "ModelChoiceIteratorValue.__init__", + "ModelChoiceIterator.__init__", + "ModelChoiceIterator.__iter__", + "ModelChoiceIterator.__len__", + "ModelChoiceIterator.__bool__", + "ModelChoiceIterator.choice", + "ModelChoiceField", + "ModelChoiceField.__init__", + "ModelChoiceField.__deepcopy__", + "ModelChoiceField:7", + "ModelChoiceField._get_choices", + "ModelChoiceField.to_python", + "ModelMultipleChoiceField", + "ModelMultipleChoiceField.__init__", + "ModelMultipleChoiceField.clean", + "ModelMultipleChoiceField._check_values", + "ModelMultipleChoiceField.prepare_value", + "modelform_defines_fields" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/forms/models.py": [ + "apply_limit_choices_to_to_formfield" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "apply_limit_choices_to_to_formfield" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/db/models/fields/related.py": [ + "RelatedField.get_limit_choices_to", + "RelatedField.formfield" + ], + "tests/model_fields/test_foreignkey.py": [] + }, + "alternative_spans": {} + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/forms/models.py": [ + "apply_limit_choices_to_to_formfield" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "apply_limit_choices_to_to_formfield" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/forms/models.py": [ + "apply_limit_choices_to_to_formfield" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "apply_limit_choices_to_to_formfield" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.get_choices" + ], + "django/db/models/fields/reverse_related.py": [ + "ForeignObjectRel.get_choices" + ], + "django/forms/models.py": [ + "ModelChoiceField.__init__" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField.__init__" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/forms/models.py": [ + "apply_limit_choices_to_to_formfield" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "apply_limit_choices_to_to_formfield" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240524_opencsg_starship_gpt4", + "spans": { + "django/forms/models.py": [ + "ModelChoiceField._set_queryset" + ] + } + }, + { + "run_name": "20240925_hyperagent_lite1", + "spans": { + "django/forms/models.py": [ + "ModelChoiceField.__init__" + ] + } + }, + { + "run_name": "20240617_moatless_gpt4o", + "spans": { + "django/forms/models.py": [ + "ModelChoiceField.__init__" + ] + } + } + ] + }, + { + "instance_id": "django__django-13321", + "repo": "django/django", + "base_commit": "35b03788b0607c1f8d2b64e4fa9e1669b0907ea4", + "problem_statement": "Decoding an invalid session data crashes.\nDescription\n\t \n\t\t(last modified by Matt Hegarty)\n\t \nHi\nI recently upgraded my staging server to 3.1. I think that there was an old session which was still active.\nOn browsing to any URL, I get the crash below. It looks similar to \u200bthis issue.\nI cannot login at all with Chrome - each attempt to access the site results in a crash. Login with Firefox works fine.\nThis is only happening on my Staging site, which is running Gunicorn behind nginx proxy.\nInternal Server Error: /overview/\nTraceback (most recent call last):\nFile \"/usr/local/lib/python3.8/site-packages/django/contrib/sessions/backends/base.py\", line 215, in _get_session\nreturn self._session_cache\nAttributeError: 'SessionStore' object has no attribute '_session_cache'\nDuring handling of the above exception, another exception occurred:\nTraceback (most recent call last):\nFile \"/usr/local/lib/python3.8/site-packages/django/contrib/sessions/backends/base.py\", line 118, in decode\nreturn signing.loads(session_data, salt=self.key_salt, serializer=self.serializer)\nFile \"/usr/local/lib/python3.8/site-packages/django/core/signing.py\", line 135, in loads\nbase64d = TimestampSigner(key, salt=salt).unsign(s, max_age=max_age).encode()\nFile \"/usr/local/lib/python3.8/site-packages/django/core/signing.py\", line 201, in unsign\nresult = super().unsign(value)\nFile \"/usr/local/lib/python3.8/site-packages/django/core/signing.py\", line 184, in unsign\nraise BadSignature('Signature \"%s\" does not match' % sig)\ndjango.core.signing.BadSignature: Signature \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\" does not match\nDuring handling of the above exception, another exception occurred:\nTraceback (most recent call last):\nFile \"/usr/local/lib/python3.8/site-packages/django/core/handlers/exception.py\", line 47, in inner\nresponse = get_response(request)\nFile \"/usr/local/lib/python3.8/site-packages/django/core/handlers/base.py\", line 179, in _get_response\nresponse = wrapped_callback(request, *callback_args, **callback_kwargs)\nFile \"/usr/local/lib/python3.8/site-packages/django/views/generic/base.py\", line 73, in view\nreturn self.dispatch(request, *args, **kwargs)\nFile \"/usr/local/lib/python3.8/site-packages/django/contrib/auth/mixins.py\", line 50, in dispatch\nif not request.user.is_authenticated:\nFile \"/usr/local/lib/python3.8/site-packages/django/utils/functional.py\", line 240, in inner\nself._setup()\nFile \"/usr/local/lib/python3.8/site-packages/django/utils/functional.py\", line 376, in _setup\nself._wrapped = self._setupfunc()\nFile \"/usr/local/lib/python3.8/site-packages/django_otp/middleware.py\", line 38, in _verify_user\nuser.otp_device = None\nFile \"/usr/local/lib/python3.8/site-packages/django/utils/functional.py\", line 270, in __setattr__\nself._setup()\nFile \"/usr/local/lib/python3.8/site-packages/django/utils/functional.py\", line 376, in _setup\nself._wrapped = self._setupfunc()\nFile \"/usr/local/lib/python3.8/site-packages/django/contrib/auth/middleware.py\", line 23, in \nrequest.user = SimpleLazyObject(lambda: get_user(request))\nFile \"/usr/local/lib/python3.8/site-packages/django/contrib/auth/middleware.py\", line 11, in get_user\nrequest._cached_user = auth.get_user(request)\nFile \"/usr/local/lib/python3.8/site-packages/django/contrib/auth/__init__.py\", line 174, in get_user\nuser_id = _get_user_session_key(request)\nFile \"/usr/local/lib/python3.8/site-packages/django/contrib/auth/__init__.py\", line 58, in _get_user_session_key\nreturn get_user_model()._meta.pk.to_python(request.session[SESSION_KEY])\nFile \"/usr/local/lib/python3.8/site-packages/django/contrib/sessions/backends/base.py\", line 65, in __getitem__\nreturn self._session[key]\nFile \"/usr/local/lib/python3.8/site-packages/django/contrib/sessions/backends/base.py\", line 220, in _get_session\nself._session_cache = self.load()\nFile \"/usr/local/lib/python3.8/site-packages/django/contrib/sessions/backends/db.py\", line 44, in load\nreturn self.decode(s.session_data) if s else {}\nFile \"/usr/local/lib/python3.8/site-packages/django/contrib/sessions/backends/base.py\", line 122, in decode\nreturn self._legacy_decode(session_data)\nFile \"/usr/local/lib/python3.8/site-packages/django/contrib/sessions/backends/base.py\", line 126, in _legacy_decode\nencoded_data = base64.b64decode(session_data.encode('ascii'))\nFile \"/usr/local/lib/python3.8/base64.py\", line 87, in b64decode\nreturn binascii.a2b_base64(s)\nbinascii.Error: Incorrect padding\n", + "golden_patch": "diff --git a/django/contrib/sessions/backends/base.py b/django/contrib/sessions/backends/base.py\n--- a/django/contrib/sessions/backends/base.py\n+++ b/django/contrib/sessions/backends/base.py\n@@ -121,6 +121,15 @@ def decode(self, session_data):\n return signing.loads(session_data, salt=self.key_salt, serializer=self.serializer)\n # RemovedInDjango40Warning: when the deprecation ends, handle here\n # exceptions similar to what _legacy_decode() does now.\n+ except signing.BadSignature:\n+ try:\n+ # Return an empty session if data is not in the pre-Django 3.1\n+ # format.\n+ return self._legacy_decode(session_data)\n+ except Exception:\n+ logger = logging.getLogger('django.security.SuspiciousSession')\n+ logger.warning('Session data corrupted')\n+ return {}\n except Exception:\n return self._legacy_decode(session_data)\n \n", + "test_patch": "diff --git a/tests/sessions_tests/tests.py b/tests/sessions_tests/tests.py\n--- a/tests/sessions_tests/tests.py\n+++ b/tests/sessions_tests/tests.py\n@@ -333,11 +333,16 @@ def test_default_hashing_algorith_legacy_decode(self):\n self.assertEqual(self.session._legacy_decode(encoded), data)\n \n def test_decode_failure_logged_to_security(self):\n- bad_encode = base64.b64encode(b'flaskdj:alkdjf').decode('ascii')\n- with self.assertLogs('django.security.SuspiciousSession', 'WARNING') as cm:\n- self.assertEqual({}, self.session.decode(bad_encode))\n- # The failed decode is logged.\n- self.assertIn('corrupted', cm.output[0])\n+ tests = [\n+ base64.b64encode(b'flaskdj:alkdjf').decode('ascii'),\n+ 'bad:encoded:value',\n+ ]\n+ for encoded in tests:\n+ with self.subTest(encoded=encoded):\n+ with self.assertLogs('django.security.SuspiciousSession', 'WARNING') as cm:\n+ self.assertEqual(self.session.decode(encoded), {})\n+ # The failed decode is logged.\n+ self.assertIn('Session data corrupted', cm.output[0])\n \n def test_actual_expiry(self):\n # this doesn't work with JSONSerializer (serializing timedelta)\n", + "fail_to_pass": "[\"test_clear (sessions_tests.tests.CookieSessionTests)\", \"test_custom_expiry_datetime (sessions_tests.tests.CookieSessionTests)\", \"test_custom_expiry_reset (sessions_tests.tests.CookieSessionTests)\", \"test_custom_expiry_seconds (sessions_tests.tests.CookieSessionTests)\", \"test_custom_expiry_timedelta (sessions_tests.tests.CookieSessionTests)\", \"test_cycle (sessions_tests.tests.CookieSessionTests)\", \"test_cycle_with_no_session_cache (sessions_tests.tests.CookieSessionTests)\", \"test_decode (sessions_tests.tests.CookieSessionTests)\", \"test_decode_failure_logged_to_security (sessions_tests.tests.CookieSessionTests)\", \"test_decode_legacy (sessions_tests.tests.CookieSessionTests)\", \"test_default_expiry (sessions_tests.tests.CookieSessionTests)\", \"test_default_hashing_algorith_legacy_decode (sessions_tests.tests.CookieSessionTests)\", \"test_delete (sessions_tests.tests.CookieSessionTests)\", \"test_flush (sessions_tests.tests.CookieSessionTests)\", \"test_get_empty (sessions_tests.tests.CookieSessionTests)\", \"test_get_expire_at_browser_close (sessions_tests.tests.CookieSessionTests)\", \"test_has_key (sessions_tests.tests.CookieSessionTests)\", \"test_invalid_key (sessions_tests.tests.CookieSessionTests)\", \"test_items (sessions_tests.tests.CookieSessionTests)\", \"test_keys (sessions_tests.tests.CookieSessionTests)\", \"test_new_session (sessions_tests.tests.CookieSessionTests)\", \"test_pop (sessions_tests.tests.CookieSessionTests)\", \"test_pop_default (sessions_tests.tests.CookieSessionTests)\", \"test_pop_default_named_argument (sessions_tests.tests.CookieSessionTests)\", \"test_pop_no_default_keyerror_raised (sessions_tests.tests.CookieSessionTests)\", \"test_save (sessions_tests.tests.CookieSessionTests)\", \"test_save_doesnt_clear_data (sessions_tests.tests.CookieSessionTests)\", \"Falsey values (Such as an empty string) are rejected.\", \"test_session_key_is_read_only (sessions_tests.tests.CookieSessionTests)\", \"Strings shorter than 8 characters are rejected.\", \"Strings of length 8 and up are accepted and stored.\", \"test_setdefault (sessions_tests.tests.CookieSessionTests)\", \"test_store (sessions_tests.tests.CookieSessionTests)\", \"test_unpickling_exception (sessions_tests.tests.CookieSessionTests)\", \"test_update (sessions_tests.tests.CookieSessionTests)\", \"test_values (sessions_tests.tests.CookieSessionTests)\", \"test_actual_expiry (sessions_tests.tests.CacheSessionTests)\", \"test_clear (sessions_tests.tests.CacheSessionTests)\", \"test_create_and_save (sessions_tests.tests.CacheSessionTests)\", \"test_custom_expiry_datetime (sessions_tests.tests.CacheSessionTests)\", \"test_custom_expiry_reset (sessions_tests.tests.CacheSessionTests)\", \"test_custom_expiry_seconds (sessions_tests.tests.CacheSessionTests)\", \"test_custom_expiry_timedelta (sessions_tests.tests.CacheSessionTests)\", \"test_cycle (sessions_tests.tests.CacheSessionTests)\", \"test_cycle_with_no_session_cache (sessions_tests.tests.CacheSessionTests)\", \"test_decode (sessions_tests.tests.CacheSessionTests)\", \"test_decode_failure_logged_to_security (sessions_tests.tests.CacheSessionTests)\", \"test_decode_legacy (sessions_tests.tests.CacheSessionTests)\", \"test_default_cache (sessions_tests.tests.CacheSessionTests)\", \"test_default_expiry (sessions_tests.tests.CacheSessionTests)\", \"test_default_hashing_algorith_legacy_decode (sessions_tests.tests.CacheSessionTests)\", \"test_delete (sessions_tests.tests.CacheSessionTests)\", \"test_flush (sessions_tests.tests.CacheSessionTests)\", \"test_get_empty (sessions_tests.tests.CacheSessionTests)\", \"test_get_expire_at_browser_close (sessions_tests.tests.CacheSessionTests)\", \"test_has_key (sessions_tests.tests.CacheSessionTests)\", \"test_invalid_key (sessions_tests.tests.CacheSessionTests)\", \"test_items (sessions_tests.tests.CacheSessionTests)\", \"test_keys (sessions_tests.tests.CacheSessionTests)\", \"test_load_overlong_key (sessions_tests.tests.CacheSessionTests)\", \"test_new_session (sessions_tests.tests.CacheSessionTests)\", \"test_non_default_cache (sessions_tests.tests.CacheSessionTests)\", \"test_pop (sessions_tests.tests.CacheSessionTests)\", \"test_pop_default (sessions_tests.tests.CacheSessionTests)\", \"test_pop_default_named_argument (sessions_tests.tests.CacheSessionTests)\", \"test_pop_no_default_keyerror_raised (sessions_tests.tests.CacheSessionTests)\", \"test_save (sessions_tests.tests.CacheSessionTests)\", \"test_save_doesnt_clear_data (sessions_tests.tests.CacheSessionTests)\", \"test_session_key_is_read_only (sessions_tests.tests.CacheSessionTests)\", \"test_session_load_does_not_create_record (sessions_tests.tests.CacheSessionTests)\", \"test_session_save_does_not_resurrect_session_logged_out_in_other_context (sessions_tests.tests.CacheSessionTests)\", \"test_setdefault (sessions_tests.tests.CacheSessionTests)\", \"test_store (sessions_tests.tests.CacheSessionTests)\", \"test_update (sessions_tests.tests.CacheSessionTests)\", \"test_values (sessions_tests.tests.CacheSessionTests)\", \"test_empty_session_saved (sessions_tests.tests.SessionMiddlewareTests)\", \"test_flush_empty_without_session_cookie_doesnt_set_cookie (sessions_tests.tests.SessionMiddlewareTests)\", \"test_httponly_session_cookie (sessions_tests.tests.SessionMiddlewareTests)\", \"test_no_httponly_session_cookie (sessions_tests.tests.SessionMiddlewareTests)\", \"test_samesite_session_cookie (sessions_tests.tests.SessionMiddlewareTests)\", \"test_secure_session_cookie (sessions_tests.tests.SessionMiddlewareTests)\", \"test_session_delete_on_end (sessions_tests.tests.SessionMiddlewareTests)\", \"test_session_delete_on_end_with_custom_domain_and_path (sessions_tests.tests.SessionMiddlewareTests)\", \"test_session_save_on_500 (sessions_tests.tests.SessionMiddlewareTests)\", \"test_session_update_error_redirect (sessions_tests.tests.SessionMiddlewareTests)\", \"test_actual_expiry (sessions_tests.tests.FileSessionPathLibTests)\", \"test_clear (sessions_tests.tests.FileSessionPathLibTests)\", \"test_clearsessions_command (sessions_tests.tests.FileSessionPathLibTests)\", \"test_configuration_check (sessions_tests.tests.FileSessionPathLibTests)\", \"test_custom_expiry_datetime (sessions_tests.tests.FileSessionPathLibTests)\", \"test_custom_expiry_reset (sessions_tests.tests.FileSessionPathLibTests)\", \"test_custom_expiry_seconds (sessions_tests.tests.FileSessionPathLibTests)\", \"test_custom_expiry_timedelta (sessions_tests.tests.FileSessionPathLibTests)\", \"test_cycle (sessions_tests.tests.FileSessionPathLibTests)\", \"test_cycle_with_no_session_cache (sessions_tests.tests.FileSessionPathLibTests)\", \"test_decode (sessions_tests.tests.FileSessionPathLibTests)\", \"test_decode_failure_logged_to_security (sessions_tests.tests.FileSessionPathLibTests)\", \"test_decode_legacy (sessions_tests.tests.FileSessionPathLibTests)\", \"test_default_expiry (sessions_tests.tests.FileSessionPathLibTests)\", \"test_default_hashing_algorith_legacy_decode (sessions_tests.tests.FileSessionPathLibTests)\", \"test_delete (sessions_tests.tests.FileSessionPathLibTests)\", \"test_flush (sessions_tests.tests.FileSessionPathLibTests)\", \"test_get_empty (sessions_tests.tests.FileSessionPathLibTests)\", \"test_get_expire_at_browser_close (sessions_tests.tests.FileSessionPathLibTests)\", \"test_has_key (sessions_tests.tests.FileSessionPathLibTests)\", \"test_invalid_key (sessions_tests.tests.FileSessionPathLibTests)\", \"test_invalid_key_backslash (sessions_tests.tests.FileSessionPathLibTests)\", \"test_invalid_key_forwardslash (sessions_tests.tests.FileSessionPathLibTests)\", \"test_items (sessions_tests.tests.FileSessionPathLibTests)\", \"test_keys (sessions_tests.tests.FileSessionPathLibTests)\", \"test_new_session (sessions_tests.tests.FileSessionPathLibTests)\", \"test_pop (sessions_tests.tests.FileSessionPathLibTests)\", \"test_pop_default (sessions_tests.tests.FileSessionPathLibTests)\", \"test_pop_default_named_argument (sessions_tests.tests.FileSessionPathLibTests)\", \"test_pop_no_default_keyerror_raised (sessions_tests.tests.FileSessionPathLibTests)\", \"test_save (sessions_tests.tests.FileSessionPathLibTests)\", \"test_save_doesnt_clear_data (sessions_tests.tests.FileSessionPathLibTests)\", \"test_session_key_is_read_only (sessions_tests.tests.FileSessionPathLibTests)\", \"test_session_load_does_not_create_record (sessions_tests.tests.FileSessionPathLibTests)\", \"test_session_save_does_not_resurrect_session_logged_out_in_other_context (sessions_tests.tests.FileSessionPathLibTests)\", \"test_setdefault (sessions_tests.tests.FileSessionPathLibTests)\", \"test_store (sessions_tests.tests.FileSessionPathLibTests)\", \"test_update (sessions_tests.tests.FileSessionPathLibTests)\", \"test_values (sessions_tests.tests.FileSessionPathLibTests)\", \"test_actual_expiry (sessions_tests.tests.FileSessionTests)\", \"test_clear (sessions_tests.tests.FileSessionTests)\", \"test_clearsessions_command (sessions_tests.tests.FileSessionTests)\", \"test_configuration_check (sessions_tests.tests.FileSessionTests)\", \"test_custom_expiry_datetime (sessions_tests.tests.FileSessionTests)\", \"test_custom_expiry_reset (sessions_tests.tests.FileSessionTests)\", \"test_custom_expiry_seconds (sessions_tests.tests.FileSessionTests)\", \"test_custom_expiry_timedelta (sessions_tests.tests.FileSessionTests)\", \"test_cycle (sessions_tests.tests.FileSessionTests)\", \"test_cycle_with_no_session_cache (sessions_tests.tests.FileSessionTests)\", \"test_decode (sessions_tests.tests.FileSessionTests)\", \"test_decode_failure_logged_to_security (sessions_tests.tests.FileSessionTests)\", \"test_decode_legacy (sessions_tests.tests.FileSessionTests)\", \"test_default_expiry (sessions_tests.tests.FileSessionTests)\", \"test_default_hashing_algorith_legacy_decode (sessions_tests.tests.FileSessionTests)\", \"test_delete (sessions_tests.tests.FileSessionTests)\", \"test_flush (sessions_tests.tests.FileSessionTests)\", \"test_get_empty (sessions_tests.tests.FileSessionTests)\", \"test_get_expire_at_browser_close (sessions_tests.tests.FileSessionTests)\", \"test_has_key (sessions_tests.tests.FileSessionTests)\", \"test_invalid_key (sessions_tests.tests.FileSessionTests)\", \"test_invalid_key_backslash (sessions_tests.tests.FileSessionTests)\", \"test_invalid_key_forwardslash (sessions_tests.tests.FileSessionTests)\", \"test_items (sessions_tests.tests.FileSessionTests)\", \"test_keys (sessions_tests.tests.FileSessionTests)\", \"test_new_session (sessions_tests.tests.FileSessionTests)\", \"test_pop (sessions_tests.tests.FileSessionTests)\", \"test_pop_default (sessions_tests.tests.FileSessionTests)\", \"test_pop_default_named_argument (sessions_tests.tests.FileSessionTests)\", \"test_pop_no_default_keyerror_raised (sessions_tests.tests.FileSessionTests)\", \"test_save (sessions_tests.tests.FileSessionTests)\", \"test_save_doesnt_clear_data (sessions_tests.tests.FileSessionTests)\", \"test_session_key_is_read_only (sessions_tests.tests.FileSessionTests)\", \"test_session_load_does_not_create_record (sessions_tests.tests.FileSessionTests)\", \"test_session_save_does_not_resurrect_session_logged_out_in_other_context (sessions_tests.tests.FileSessionTests)\", \"test_setdefault (sessions_tests.tests.FileSessionTests)\", \"test_store (sessions_tests.tests.FileSessionTests)\", \"test_update (sessions_tests.tests.FileSessionTests)\", \"test_values (sessions_tests.tests.FileSessionTests)\", \"test_actual_expiry (sessions_tests.tests.DatabaseSessionTests)\", \"test_clear (sessions_tests.tests.DatabaseSessionTests)\", \"test_clearsessions_command (sessions_tests.tests.DatabaseSessionTests)\", \"test_custom_expiry_datetime (sessions_tests.tests.DatabaseSessionTests)\", \"test_custom_expiry_reset (sessions_tests.tests.DatabaseSessionTests)\", \"test_custom_expiry_seconds (sessions_tests.tests.DatabaseSessionTests)\", \"test_custom_expiry_timedelta (sessions_tests.tests.DatabaseSessionTests)\", \"test_cycle (sessions_tests.tests.DatabaseSessionTests)\", \"test_cycle_with_no_session_cache (sessions_tests.tests.DatabaseSessionTests)\", \"test_decode (sessions_tests.tests.DatabaseSessionTests)\", \"test_decode_failure_logged_to_security (sessions_tests.tests.DatabaseSessionTests)\", \"test_decode_legacy (sessions_tests.tests.DatabaseSessionTests)\", \"test_default_expiry (sessions_tests.tests.DatabaseSessionTests)\", \"test_default_hashing_algorith_legacy_decode (sessions_tests.tests.DatabaseSessionTests)\", \"test_delete (sessions_tests.tests.DatabaseSessionTests)\", \"test_flush (sessions_tests.tests.DatabaseSessionTests)\", \"test_get_empty (sessions_tests.tests.DatabaseSessionTests)\", \"test_get_expire_at_browser_close (sessions_tests.tests.DatabaseSessionTests)\", \"test_has_key (sessions_tests.tests.DatabaseSessionTests)\", \"test_invalid_key (sessions_tests.tests.DatabaseSessionTests)\", \"test_items (sessions_tests.tests.DatabaseSessionTests)\", \"test_keys (sessions_tests.tests.DatabaseSessionTests)\", \"test_new_session (sessions_tests.tests.DatabaseSessionTests)\", \"test_pop (sessions_tests.tests.DatabaseSessionTests)\", \"test_pop_default (sessions_tests.tests.DatabaseSessionTests)\", \"test_pop_default_named_argument (sessions_tests.tests.DatabaseSessionTests)\", \"test_pop_no_default_keyerror_raised (sessions_tests.tests.DatabaseSessionTests)\", \"test_save (sessions_tests.tests.DatabaseSessionTests)\", \"test_save_doesnt_clear_data (sessions_tests.tests.DatabaseSessionTests)\", \"test_session_get_decoded (sessions_tests.tests.DatabaseSessionTests)\", \"test_session_key_is_read_only (sessions_tests.tests.DatabaseSessionTests)\", \"test_session_load_does_not_create_record (sessions_tests.tests.DatabaseSessionTests)\", \"test_session_save_does_not_resurrect_session_logged_out_in_other_context (sessions_tests.tests.DatabaseSessionTests)\", \"Session repr should be the session key.\", \"test_sessionmanager_save (sessions_tests.tests.DatabaseSessionTests)\", \"test_setdefault (sessions_tests.tests.DatabaseSessionTests)\", \"test_store (sessions_tests.tests.DatabaseSessionTests)\", \"test_update (sessions_tests.tests.DatabaseSessionTests)\", \"test_values (sessions_tests.tests.DatabaseSessionTests)\", \"test_actual_expiry (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_clear (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_clearsessions_command (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_custom_expiry_datetime (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_custom_expiry_reset (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_custom_expiry_seconds (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_custom_expiry_timedelta (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_cycle (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_cycle_with_no_session_cache (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_decode (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_decode_failure_logged_to_security (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_decode_legacy (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_default_expiry (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_default_hashing_algorith_legacy_decode (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_delete (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_extra_session_field (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_flush (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_get_empty (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_get_expire_at_browser_close (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_has_key (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_invalid_key (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_items (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_keys (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_new_session (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_pop (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_pop_default (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_pop_default_named_argument (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_pop_no_default_keyerror_raised (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_save (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_save_doesnt_clear_data (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_session_get_decoded (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_session_key_is_read_only (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_session_load_does_not_create_record (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_session_save_does_not_resurrect_session_logged_out_in_other_context (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_sessionmanager_save (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_setdefault (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_store (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_update (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_values (sessions_tests.tests.CustomDatabaseSessionTests)\", \"test_actual_expiry (sessions_tests.tests.CacheDBSessionTests)\", \"test_clear (sessions_tests.tests.CacheDBSessionTests)\", \"test_custom_expiry_datetime (sessions_tests.tests.CacheDBSessionTests)\", \"test_custom_expiry_reset (sessions_tests.tests.CacheDBSessionTests)\", \"test_custom_expiry_seconds (sessions_tests.tests.CacheDBSessionTests)\", \"test_custom_expiry_timedelta (sessions_tests.tests.CacheDBSessionTests)\", \"test_cycle (sessions_tests.tests.CacheDBSessionTests)\", \"test_cycle_with_no_session_cache (sessions_tests.tests.CacheDBSessionTests)\", \"test_decode (sessions_tests.tests.CacheDBSessionTests)\", \"test_decode_failure_logged_to_security (sessions_tests.tests.CacheDBSessionTests)\", \"test_decode_legacy (sessions_tests.tests.CacheDBSessionTests)\", \"test_default_expiry (sessions_tests.tests.CacheDBSessionTests)\", \"test_default_hashing_algorith_legacy_decode (sessions_tests.tests.CacheDBSessionTests)\", \"test_delete (sessions_tests.tests.CacheDBSessionTests)\", \"test_exists_searches_cache_first (sessions_tests.tests.CacheDBSessionTests)\", \"test_flush (sessions_tests.tests.CacheDBSessionTests)\", \"test_get_empty (sessions_tests.tests.CacheDBSessionTests)\", \"test_get_expire_at_browser_close (sessions_tests.tests.CacheDBSessionTests)\", \"test_has_key (sessions_tests.tests.CacheDBSessionTests)\", \"test_invalid_key (sessions_tests.tests.CacheDBSessionTests)\", \"test_items (sessions_tests.tests.CacheDBSessionTests)\", \"test_keys (sessions_tests.tests.CacheDBSessionTests)\", \"test_load_overlong_key (sessions_tests.tests.CacheDBSessionTests)\", \"test_new_session (sessions_tests.tests.CacheDBSessionTests)\", \"test_non_default_cache (sessions_tests.tests.CacheDBSessionTests)\", \"test_pop (sessions_tests.tests.CacheDBSessionTests)\", \"test_pop_default (sessions_tests.tests.CacheDBSessionTests)\", \"test_pop_default_named_argument (sessions_tests.tests.CacheDBSessionTests)\", \"test_pop_no_default_keyerror_raised (sessions_tests.tests.CacheDBSessionTests)\", \"test_save (sessions_tests.tests.CacheDBSessionTests)\", \"test_save_doesnt_clear_data (sessions_tests.tests.CacheDBSessionTests)\", \"test_session_key_is_read_only (sessions_tests.tests.CacheDBSessionTests)\", \"test_session_load_does_not_create_record (sessions_tests.tests.CacheDBSessionTests)\", \"test_session_save_does_not_resurrect_session_logged_out_in_other_context (sessions_tests.tests.CacheDBSessionTests)\", \"test_setdefault (sessions_tests.tests.CacheDBSessionTests)\", \"test_store (sessions_tests.tests.CacheDBSessionTests)\", \"test_update (sessions_tests.tests.CacheDBSessionTests)\", \"test_values (sessions_tests.tests.CacheDBSessionTests)\", \"test_actual_expiry (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_clear (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_custom_expiry_datetime (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_custom_expiry_reset (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_custom_expiry_seconds (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_custom_expiry_timedelta (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_cycle (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_cycle_with_no_session_cache (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_decode (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_decode_failure_logged_to_security (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_decode_legacy (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_default_expiry (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_default_hashing_algorith_legacy_decode (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_delete (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_exists_searches_cache_first (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_flush (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_get_empty (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_get_expire_at_browser_close (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_has_key (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_invalid_key (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_items (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_keys (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_load_overlong_key (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_new_session (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_non_default_cache (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_pop (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_pop_default (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_pop_default_named_argument (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_pop_no_default_keyerror_raised (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_save (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_save_doesnt_clear_data (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_session_key_is_read_only (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_session_load_does_not_create_record (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_session_save_does_not_resurrect_session_logged_out_in_other_context (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_setdefault (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_store (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_update (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_values (sessions_tests.tests.CacheDBSessionWithTimeZoneTests)\", \"test_actual_expiry (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_clear (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_clearsessions_command (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_custom_expiry_datetime (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_custom_expiry_reset (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_custom_expiry_seconds (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_custom_expiry_timedelta (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_cycle (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_cycle_with_no_session_cache (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_decode (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_decode_failure_logged_to_security (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_decode_legacy (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_default_expiry (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_default_hashing_algorith_legacy_decode (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_delete (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_flush (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_get_empty (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_get_expire_at_browser_close (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_has_key (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_invalid_key (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_items (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_keys (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_new_session (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_pop (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_pop_default (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_pop_default_named_argument (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_pop_no_default_keyerror_raised (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_save (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_save_doesnt_clear_data (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_session_get_decoded (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_session_key_is_read_only (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_session_load_does_not_create_record (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_session_save_does_not_resurrect_session_logged_out_in_other_context (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_sessionmanager_save (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_setdefault (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_store (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_update (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\", \"test_values (sessions_tests.tests.DatabaseSessionWithTimeZoneTests)\"]", + "pass_to_pass": "[]", + "expected_spans": { + "django/contrib/sessions/backends/base.py": [ + "SessionBase.decode" + ] + }, + "test_file_spans": { + "tests/sessions_tests/tests.py": [ + "SessionTestsMixin.test_decode_failure_logged_to_security" + ] + }, + "resolved_by": [ + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/contrib/sessions/backends/base.py": [ + "imports", + "SessionBase", + "SessionBase.__getitem__", + "SessionBase._hash", + "SessionBase.encode", + "SessionBase.decode", + "SessionBase._legacy_encode", + "SessionBase._legacy_decode", + "SessionBase.clear", + "SessionBase.is_empty", + "SessionBase._get_new_session_key", + "SessionBase._set_session_key", + "SessionBase._get_session", + "SessionBase.get_expiry_age", + "SessionBase.get_expiry_date", + "SessionBase.set_expiry", + "SessionBase.cycle_key", + "SessionBase.exists", + "SessionBase.create", + "SessionBase.save", + "SessionBase.delete", + "SessionBase.load", + "SessionBase.clear_expired" + ] + }, + "alternative_spans": { + "django/contrib/sessions/backends/base.py": [ + "imports", + "SessionBase", + "SessionBase.__getitem__", + "SessionBase._hash", + "SessionBase.encode", + "SessionBase.decode", + "SessionBase._legacy_encode", + "SessionBase._legacy_decode", + "SessionBase.clear", + "SessionBase.is_empty", + "SessionBase._get_new_session_key", + "SessionBase._set_session_key", + "SessionBase._get_session", + "SessionBase.get_expiry_age", + "SessionBase.get_expiry_date", + "SessionBase.set_expiry", + "SessionBase.cycle_key", + "SessionBase.exists", + "SessionBase.create", + "SessionBase.save", + "SessionBase.delete", + "SessionBase.load", + "SessionBase.clear_expired" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-20049", + "repo": "sympy/sympy", + "base_commit": "d57aaf064041fe52c0fa357639b069100f8b28e1", + "problem_statement": "Point.vel() should calculate the velocity if possible\nIf you specify the orientation of two reference frames and then ask for the angular velocity between the two reference frames the angular velocity will be calculated. But if you try to do the same thing with velocities, this doesn't work. See below:\r\n\r\n```\r\nIn [1]: import sympy as sm \r\n\r\nIn [2]: import sympy.physics.mechanics as me \r\n\r\nIn [3]: A = me.ReferenceFrame('A') \r\n\r\nIn [5]: q = me.dynamicsymbols('q') \r\n\r\nIn [6]: B = A.orientnew('B', 'Axis', (q, A.x)) \r\n\r\nIn [7]: B.ang_vel_in(A) \r\nOut[7]: q'*A.x\r\n\r\nIn [9]: P = me.Point('P') \r\n\r\nIn [10]: Q = me.Point('Q') \r\n\r\nIn [11]: r = q*A.x + 2*q*A.y \r\n\r\nIn [12]: Q.set_pos(P, r) \r\n\r\nIn [13]: Q.vel(A) \r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n in \r\n----> 1 Q.vel(A)\r\n\r\n~/miniconda3/lib/python3.6/site-packages/sympy/physics/vector/point.py in vel(self, frame)\r\n 453 if not (frame in self._vel_dict):\r\n 454 raise ValueError('Velocity of point ' + self.name + ' has not been'\r\n--> 455 ' defined in ReferenceFrame ' + frame.name)\r\n 456 return self._vel_dict[frame]\r\n 457 \r\n\r\nValueError: Velocity of point Q has not been defined in ReferenceFrame A\r\n```\r\n\r\nThe expected result of the `Q.vel(A)` should be:\r\n\r\n```\r\nIn [14]: r.dt(A) \r\nOut[14]: q'*A.x + 2*q'*A.y\r\n```\r\n\r\nI think that this is possible. Maybe there is a reason it isn't implemented. But we should try to implement it because it is confusing why this works for orientations and not positions.\r\n\r\n\n", + "golden_patch": "diff --git a/sympy/physics/vector/point.py b/sympy/physics/vector/point.py\n--- a/sympy/physics/vector/point.py\n+++ b/sympy/physics/vector/point.py\n@@ -483,19 +483,49 @@ def vel(self, frame):\n Examples\n ========\n \n- >>> from sympy.physics.vector import Point, ReferenceFrame\n+ >>> from sympy.physics.vector import Point, ReferenceFrame, dynamicsymbols\n >>> N = ReferenceFrame('N')\n >>> p1 = Point('p1')\n >>> p1.set_vel(N, 10 * N.x)\n >>> p1.vel(N)\n 10*N.x\n \n+ Velocities will be automatically calculated if possible, otherwise a ``ValueError`` will be returned. If it is possible to calculate multiple different velocities from the relative points, the points defined most directly relative to this point will be used. In the case of inconsistent relative positions of points, incorrect velocities may be returned. It is up to the user to define prior relative positions and velocities of points in a self-consistent way.\n+\n+ >>> p = Point('p')\n+ >>> q = dynamicsymbols('q')\n+ >>> p.set_vel(N, 10 * N.x)\n+ >>> p2 = Point('p2')\n+ >>> p2.set_pos(p, q*N.x)\n+ >>> p2.vel(N)\n+ (Derivative(q(t), t) + 10)*N.x\n+\n \"\"\"\n \n _check_frame(frame)\n if not (frame in self._vel_dict):\n- raise ValueError('Velocity of point ' + self.name + ' has not been'\n+ visited = []\n+ queue = [self]\n+ while queue: #BFS to find nearest point\n+ node = queue.pop(0)\n+ if node not in visited:\n+ visited.append(node)\n+ for neighbor, neighbor_pos in node._pos_dict.items():\n+ try:\n+ neighbor_pos.express(frame) #Checks if pos vector is valid\n+ except ValueError:\n+ continue\n+ try :\n+ neighbor_velocity = neighbor._vel_dict[frame] #Checks if point has its vel defined in req frame\n+ except KeyError:\n+ queue.append(neighbor)\n+ continue\n+ self.set_vel(frame, self.pos_from(neighbor).dt(frame) + neighbor_velocity)\n+ return self._vel_dict[frame]\n+ else:\n+ raise ValueError('Velocity of point ' + self.name + ' has not been'\n ' defined in ReferenceFrame ' + frame.name)\n+\n return self._vel_dict[frame]\n \n def partial_velocity(self, frame, *gen_speeds):\n", + "test_patch": "diff --git a/sympy/physics/vector/tests/test_point.py b/sympy/physics/vector/tests/test_point.py\n--- a/sympy/physics/vector/tests/test_point.py\n+++ b/sympy/physics/vector/tests/test_point.py\n@@ -126,3 +126,107 @@ def test_point_partial_velocity():\n assert p.partial_velocity(N, u1) == A.x\n assert p.partial_velocity(N, u1, u2) == (A.x, N.y)\n raises(ValueError, lambda: p.partial_velocity(A, u1))\n+\n+def test_point_vel(): #Basic functionality\n+ q1, q2 = dynamicsymbols('q1 q2')\n+ N = ReferenceFrame('N')\n+ B = ReferenceFrame('B')\n+ Q = Point('Q')\n+ O = Point('O')\n+ Q.set_pos(O, q1 * N.x)\n+ raises(ValueError , lambda: Q.vel(N)) # Velocity of O in N is not defined\n+ O.set_vel(N, q2 * N.y)\n+ assert O.vel(N) == q2 * N.y\n+ raises(ValueError , lambda : O.vel(B)) #Velocity of O is not defined in B\n+\n+def test_auto_point_vel():\n+ t = dynamicsymbols._t\n+ q1, q2 = dynamicsymbols('q1 q2')\n+ N = ReferenceFrame('N')\n+ B = ReferenceFrame('B')\n+ O = Point('O')\n+ Q = Point('Q')\n+ Q.set_pos(O, q1 * N.x)\n+ O.set_vel(N, q2 * N.y)\n+ assert Q.vel(N) == q1.diff(t) * N.x + q2 * N.y # Velocity of Q using O\n+ P1 = Point('P1')\n+ P1.set_pos(O, q1 * B.x)\n+ P2 = Point('P2')\n+ P2.set_pos(P1, q2 * B.z)\n+ raises(ValueError, lambda : P2.vel(B)) # O's velocity is defined in different frame, and no\n+ #point in between has its velocity defined\n+ raises(ValueError, lambda: P2.vel(N)) # Velocity of O not defined in N\n+\n+def test_auto_point_vel_multiple_point_path():\n+ t = dynamicsymbols._t\n+ q1, q2 = dynamicsymbols('q1 q2')\n+ B = ReferenceFrame('B')\n+ P = Point('P')\n+ P.set_vel(B, q1 * B.x)\n+ P1 = Point('P1')\n+ P1.set_pos(P, q2 * B.y)\n+ P1.set_vel(B, q1 * B.z)\n+ P2 = Point('P2')\n+ P2.set_pos(P1, q1 * B.z)\n+ P3 = Point('P3')\n+ P3.set_pos(P2, 10 * q1 * B.y)\n+ assert P3.vel(B) == 10 * q1.diff(t) * B.y + (q1 + q1.diff(t)) * B.z\n+\n+def test_auto_vel_dont_overwrite():\n+ t = dynamicsymbols._t\n+ q1, q2, u1 = dynamicsymbols('q1, q2, u1')\n+ N = ReferenceFrame('N')\n+ P = Point('P1')\n+ P.set_vel(N, u1 * N.x)\n+ P1 = Point('P1')\n+ P1.set_pos(P, q2 * N.y)\n+ assert P1.vel(N) == q2.diff(t) * N.y + u1 * N.x\n+ assert P.vel(N) == u1 * N.x\n+ P1.set_vel(N, u1 * N.z)\n+ assert P1.vel(N) == u1 * N.z\n+\n+def test_auto_point_vel_if_tree_has_vel_but_inappropriate_pos_vector():\n+ q1, q2 = dynamicsymbols('q1 q2')\n+ B = ReferenceFrame('B')\n+ S = ReferenceFrame('S')\n+ P = Point('P')\n+ P.set_vel(B, q1 * B.x)\n+ P1 = Point('P1')\n+ P1.set_pos(P, S.y)\n+ raises(ValueError, lambda : P1.vel(B)) # P1.pos_from(P) can't be expressed in B\n+ raises(ValueError, lambda : P1.vel(S)) # P.vel(S) not defined\n+\n+def test_auto_point_vel_shortest_path():\n+ t = dynamicsymbols._t\n+ q1, q2, u1, u2 = dynamicsymbols('q1 q2 u1 u2')\n+ B = ReferenceFrame('B')\n+ P = Point('P')\n+ P.set_vel(B, u1 * B.x)\n+ P1 = Point('P1')\n+ P1.set_pos(P, q2 * B.y)\n+ P1.set_vel(B, q1 * B.z)\n+ P2 = Point('P2')\n+ P2.set_pos(P1, q1 * B.z)\n+ P3 = Point('P3')\n+ P3.set_pos(P2, 10 * q1 * B.y)\n+ P4 = Point('P4')\n+ P4.set_pos(P3, q1 * B.x)\n+ O = Point('O')\n+ O.set_vel(B, u2 * B.y)\n+ O1 = Point('O1')\n+ O1.set_pos(O, q2 * B.z)\n+ P4.set_pos(O1, q1 * B.x + q2 * B.z)\n+ assert P4.vel(B) == q1.diff(t) * B.x + u2 * B.y + 2 * q2.diff(t) * B.z\n+\n+def test_auto_point_vel_connected_frames():\n+ t = dynamicsymbols._t\n+ q, q1, q2, u = dynamicsymbols('q q1 q2 u')\n+ N = ReferenceFrame('N')\n+ B = ReferenceFrame('B')\n+ O = Point('O')\n+ O.set_vel(N, u * N.x)\n+ P = Point('P')\n+ P.set_pos(O, q1 * N.x + q2 * B.y)\n+ raises(ValueError, lambda: P.vel(N))\n+ N.orient(B, 'Axis', (q, B.x))\n+ assert P.vel(N) == (u + q1.diff(t)) * N.x + q2.diff(t) * B.y - q2 * q.diff(t) * B.z\n", + "fail_to_pass": "[\"test_auto_point_vel\", \"test_auto_point_vel_multiple_point_path\", \"test_auto_vel_dont_overwrite\", \"test_auto_point_vel_shortest_path\"]", + "pass_to_pass": "[\"test_point_v1pt_theorys\", \"test_point_a1pt_theorys\", \"test_point_v2pt_theorys\", \"test_point_a2pt_theorys\", \"test_point_funcs\", \"test_point_pos\", \"test_point_partial_velocity\", \"test_point_vel\", \"test_auto_point_vel_if_tree_has_vel_but_inappropriate_pos_vector\"]", + "expected_spans": { + "sympy/physics/vector/point.py": [ + "Point.v2pt_theory" + ] + }, + "test_file_spans": { + "sympy/physics/vector/tests/test_point.py": [] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-13401", + "repo": "django/django", + "base_commit": "453967477e3ddae704cd739eac2449c0e13d464c", + "problem_statement": "Abstract model field should not be equal across models\nDescription\n\t\nConsider the following models:\nclass A(models.Model):\n\tclass Meta:\n\t\tabstract = True\n\tmyfield = IntegerField()\nclass B(A):\n\tpass\nclass C(A):\n\tpass\nIf I pull the fields of B and C into a shared set, one will be de-duplicated away, because they compare as equal. I found this surprising, though in practice using a list was sufficient for my need. The root of the issue is that they compare equal, as fields only consider self.creation_counter when comparing for equality.\nlen({B._meta.get_field('myfield'), C._meta.get_field('myfield')}) == 1\nB._meta.get_field('myfield') == C._meta.get_field('myfield')\nWe should adjust __eq__ so that if the field.model is different, they will compare unequal. Similarly, it is probably wise to adjust __hash__ and __lt__ to match.\nWhen adjusting __lt__, it may be wise to order first by self.creation_counter so that cases not affected by this equality collision won't be re-ordered. In my experimental branch, there was one test that broke if I ordered them by model first.\nI brought this up on IRC django-dev to check my intuitions, and those conversing with me there seemed to agree that the current behavior is not intuitive.\n", + "golden_patch": "diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -516,17 +516,37 @@ def clone(self):\n def __eq__(self, other):\n # Needed for @total_ordering\n if isinstance(other, Field):\n- return self.creation_counter == other.creation_counter\n+ return (\n+ self.creation_counter == other.creation_counter and\n+ getattr(self, 'model', None) == getattr(other, 'model', None)\n+ )\n return NotImplemented\n \n def __lt__(self, other):\n # This is needed because bisect does not take a comparison function.\n+ # Order by creation_counter first for backward compatibility.\n if isinstance(other, Field):\n- return self.creation_counter < other.creation_counter\n+ if (\n+ self.creation_counter != other.creation_counter or\n+ not hasattr(self, 'model') and not hasattr(other, 'model')\n+ ):\n+ return self.creation_counter < other.creation_counter\n+ elif hasattr(self, 'model') != hasattr(other, 'model'):\n+ return not hasattr(self, 'model') # Order no-model fields first\n+ else:\n+ # creation_counter's are equal, compare only models.\n+ return (\n+ (self.model._meta.app_label, self.model._meta.model_name) <\n+ (other.model._meta.app_label, other.model._meta.model_name)\n+ )\n return NotImplemented\n \n def __hash__(self):\n- return hash(self.creation_counter)\n+ return hash((\n+ self.creation_counter,\n+ self.model._meta.app_label if hasattr(self, 'model') else None,\n+ self.model._meta.model_name if hasattr(self, 'model') else None,\n+ ))\n \n def __deepcopy__(self, memodict):\n # We don't have to deepcopy very much here, since most things are not\n", + "test_patch": "diff --git a/tests/model_fields/tests.py b/tests/model_fields/tests.py\n--- a/tests/model_fields/tests.py\n+++ b/tests/model_fields/tests.py\n@@ -102,6 +102,36 @@ def test_deconstruct_nested_field(self):\n name, path, args, kwargs = Nested.Field().deconstruct()\n self.assertEqual(path, 'model_fields.tests.Nested.Field')\n \n+ def test_abstract_inherited_fields(self):\n+ \"\"\"Field instances from abstract models are not equal.\"\"\"\n+ class AbstractModel(models.Model):\n+ field = models.IntegerField()\n+\n+ class Meta:\n+ abstract = True\n+\n+ class InheritAbstractModel1(AbstractModel):\n+ pass\n+\n+ class InheritAbstractModel2(AbstractModel):\n+ pass\n+\n+ abstract_model_field = AbstractModel._meta.get_field('field')\n+ inherit1_model_field = InheritAbstractModel1._meta.get_field('field')\n+ inherit2_model_field = InheritAbstractModel2._meta.get_field('field')\n+\n+ self.assertNotEqual(abstract_model_field, inherit1_model_field)\n+ self.assertNotEqual(abstract_model_field, inherit2_model_field)\n+ self.assertNotEqual(inherit1_model_field, inherit2_model_field)\n+\n+ self.assertLess(abstract_model_field, inherit1_model_field)\n+ self.assertLess(abstract_model_field, inherit2_model_field)\n+ self.assertLess(inherit1_model_field, inherit2_model_field)\n+\n+ self.assertNotEqual(hash(abstract_model_field), hash(inherit1_model_field))\n+ self.assertNotEqual(hash(abstract_model_field), hash(inherit2_model_field))\n+ self.assertNotEqual(hash(inherit1_model_field), hash(inherit2_model_field))\n+\n \n class ChoicesTests(SimpleTestCase):\n \n", + "fail_to_pass": "[\"Field instances from abstract models are not equal.\"]", + "pass_to_pass": "[\"test_blank_in_choices (model_fields.tests.GetChoicesTests)\", \"test_blank_in_grouped_choices (model_fields.tests.GetChoicesTests)\", \"test_empty_choices (model_fields.tests.GetChoicesTests)\", \"test_lazy_strings_not_evaluated (model_fields.tests.GetChoicesTests)\", \"test_get_choices (model_fields.tests.GetChoicesLimitChoicesToTests)\", \"test_get_choices_reverse_related_field (model_fields.tests.GetChoicesLimitChoicesToTests)\", \"test_choices_and_field_display (model_fields.tests.GetFieldDisplayTests)\", \"test_empty_iterator_choices (model_fields.tests.GetFieldDisplayTests)\", \"A translated display value is coerced to str.\", \"test_iterator_choices (model_fields.tests.GetFieldDisplayTests)\", \"test_overriding_FIELD_display (model_fields.tests.GetFieldDisplayTests)\", \"test_overriding_inherited_FIELD_display (model_fields.tests.GetFieldDisplayTests)\", \"Can supply a custom choices form class to Field.formfield()\", \"deconstruct() uses __qualname__ for nested class support.\", \"Field instances can be pickled.\", \"test_field_name (model_fields.tests.BasicFieldTests)\", \"Fields are ordered based on their creation.\", \"test_field_repr (model_fields.tests.BasicFieldTests)\", \"__repr__() uses __qualname__ for nested class support.\", \"test_field_str (model_fields.tests.BasicFieldTests)\", \"test_field_verbose_name (model_fields.tests.BasicFieldTests)\", \"Field.formfield() sets disabled for fields with choices.\", \"test_show_hidden_initial (model_fields.tests.BasicFieldTests)\", \"test_get_choices (model_fields.tests.GetChoicesOrderingTests)\", \"test_get_choices_default_ordering (model_fields.tests.GetChoicesOrderingTests)\", \"test_get_choices_reverse_related_field (model_fields.tests.GetChoicesOrderingTests)\", \"test_get_choices_reverse_related_field_default_ordering (model_fields.tests.GetChoicesOrderingTests)\", \"test_check (model_fields.tests.ChoicesTests)\", \"test_choices (model_fields.tests.ChoicesTests)\", \"test_flatchoices (model_fields.tests.ChoicesTests)\", \"test_formfield (model_fields.tests.ChoicesTests)\", \"test_invalid_choice (model_fields.tests.ChoicesTests)\"]", + "expected_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "test_file_spans": { + "tests/model_fields/tests.py": [ + "BasicFieldTests" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__", + "Field" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__", + "Field" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field", + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field", + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "Field.__eq__", + "Field.__lt__", + "Field.__hash__" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-13447", + "repo": "django/django", + "base_commit": "0456d3e42795481a186db05719300691fe2a1029", + "problem_statement": "Added model class to app_list context\nDescription\n\t \n\t\t(last modified by Raffaele Salmaso)\n\t \nI need to manipulate the app_list in my custom admin view, and the easiest way to get the result is to have access to the model class (currently the dictionary is a serialized model).\nIn addition I would make the _build_app_dict method public, as it is used by the two views index and app_index.\n", + "golden_patch": "diff --git a/django/contrib/admin/sites.py b/django/contrib/admin/sites.py\n--- a/django/contrib/admin/sites.py\n+++ b/django/contrib/admin/sites.py\n@@ -461,6 +461,7 @@ def _build_app_dict(self, request, label=None):\n \n info = (app_label, model._meta.model_name)\n model_dict = {\n+ 'model': model,\n 'name': capfirst(model._meta.verbose_name_plural),\n 'object_name': model._meta.object_name,\n 'perms': perms,\n", + "test_patch": "diff --git a/tests/admin_views/test_adminsite.py b/tests/admin_views/test_adminsite.py\n--- a/tests/admin_views/test_adminsite.py\n+++ b/tests/admin_views/test_adminsite.py\n@@ -55,7 +55,9 @@ def test_available_apps(self):\n admin_views = apps[0]\n self.assertEqual(admin_views['app_label'], 'admin_views')\n self.assertEqual(len(admin_views['models']), 1)\n- self.assertEqual(admin_views['models'][0]['object_name'], 'Article')\n+ article = admin_views['models'][0]\n+ self.assertEqual(article['object_name'], 'Article')\n+ self.assertEqual(article['model'], Article)\n \n # auth.User\n auth = apps[1]\n@@ -63,6 +65,7 @@ def test_available_apps(self):\n self.assertEqual(len(auth['models']), 1)\n user = auth['models'][0]\n self.assertEqual(user['object_name'], 'User')\n+ self.assertEqual(user['model'], User)\n \n self.assertEqual(auth['app_url'], '/test_admin/admin/auth/')\n self.assertIs(auth['has_module_perms'], True)\n", + "fail_to_pass": "[\"test_available_apps (admin_views.test_adminsite.SiteEachContextTest)\"]", + "pass_to_pass": "[\"test_add_action (admin_views.test_adminsite.SiteActionsTests)\", \"test_disable_action (admin_views.test_adminsite.SiteActionsTests)\", \"AdminSite.get_action() returns an action even if it's disabled.\", \"test_each_context (admin_views.test_adminsite.SiteEachContextTest)\", \"test_each_context_site_url_with_script_name (admin_views.test_adminsite.SiteEachContextTest)\"]", + "expected_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict" + ] + }, + "test_file_spans": { + "tests/admin_views/test_adminsite.py": [ + "SiteEachContextTest.test_available_apps" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.index" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.index", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.index", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.index", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.index", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240402_rag_gpt4", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + }, + "alternative_spans": { + "django/contrib/admin/sites.py": [ + "AdminSite._build_app_dict", + "AdminSite.get_app_list", + "AdminSite.app_index" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-13448", + "repo": "django/django", + "base_commit": "7b9596b974fb0ad1868b10c8c2174e10b72be403", + "problem_statement": "Test runner setup_databases crashes with \"TEST\": {\"MIGRATE\": False}.\nDescription\n\t\nI'm trying to upgrade a project from Django 3.0 to Django 3.1 and wanted to try out the new \"TEST\": {\"MIGRATE\": False} database setting.\nSadly I'm running into an issue immediately when running ./manage.py test.\nRemoving the \"TEST\": {\"MIGRATE\": False} line allows the tests to run. So this is not blocking the upgrade for us, but it would be nice if we were able to use the new feature to skip migrations during testing.\nFor reference, this project was recently upgraded from Django 1.4 all the way to 3.0 so there might be some legacy cruft somewhere that triggers this.\nHere's the trackeback. I'll try to debug this some more.\nTraceback (most recent call last):\n File \"/usr/local/lib/python3.6/site-packages/django/db/backends/utils.py\", line 84, in _execute\n\treturn self.cursor.execute(sql, params)\npsycopg2.errors.UndefinedTable: relation \"django_admin_log\" does not exist\nLINE 1: ...n_flag\", \"django_admin_log\".\"change_message\" FROM \"django_ad...\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ^\nThe above exception was the direct cause of the following exception:\nTraceback (most recent call last):\n File \"/usr/local/lib/python3.6/site-packages/django/db/models/sql/compiler.py\", line 1156, in execute_sql\n\tcursor.execute(sql, params)\n File \"/usr/local/lib/python3.6/site-packages/django/db/backends/utils.py\", line 66, in execute\n\treturn self._execute_with_wrappers(sql, params, many=False, executor=self._execute)\n File \"/usr/local/lib/python3.6/site-packages/django/db/backends/utils.py\", line 75, in _execute_with_wrappers\n\treturn executor(sql, params, many, context)\n File \"/usr/local/lib/python3.6/site-packages/django/db/backends/utils.py\", line 84, in _execute\n\treturn self.cursor.execute(sql, params)\n File \"/usr/local/lib/python3.6/site-packages/django/db/utils.py\", line 90, in __exit__\n\traise dj_exc_value.with_traceback(traceback) from exc_value\n File \"/usr/local/lib/python3.6/site-packages/django/db/backends/utils.py\", line 84, in _execute\n\treturn self.cursor.execute(sql, params)\ndjango.db.utils.ProgrammingError: relation \"django_admin_log\" does not exist\nLINE 1: ...n_flag\", \"django_admin_log\".\"change_message\" FROM \"django_ad...\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ^\nDuring handling of the above exception, another exception occurred:\nTraceback (most recent call last):\n File \"./manage.py\", line 15, in \n\tmain()\n File \"./manage.py\", line 11, in main\n\texecute_from_command_line(sys.argv)\n File \"/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py\", line 401, in execute_from_command_line\n\tutility.execute()\n File \"/usr/local/lib/python3.6/site-packages/django/core/management/__init__.py\", line 395, in execute\n\tself.fetch_command(subcommand).run_from_argv(self.argv)\n File \"/usr/local/lib/python3.6/site-packages/django/core/management/commands/test.py\", line 23, in run_from_argv\n\tsuper().run_from_argv(argv)\n File \"/usr/local/lib/python3.6/site-packages/django/core/management/base.py\", line 330, in run_from_argv\n\tself.execute(*args, **cmd_options)\n File \"/usr/local/lib/python3.6/site-packages/django/core/management/base.py\", line 371, in execute\n\toutput = self.handle(*args, **options)\n File \"/usr/local/lib/python3.6/site-packages/django/core/management/commands/test.py\", line 53, in handle\n\tfailures = test_runner.run_tests(test_labels)\n File \"/usr/local/lib/python3.6/site-packages/django/test/runner.py\", line 695, in run_tests\n\told_config = self.setup_databases(aliases=databases)\n File \"/usr/local/lib/python3.6/site-packages/django/test/runner.py\", line 616, in setup_databases\n\tself.parallel, **kwargs\n File \"/usr/local/lib/python3.6/site-packages/django/test/utils.py\", line 174, in setup_databases\n\tserialize=connection.settings_dict['TEST'].get('SERIALIZE', True),\n File \"/usr/local/lib/python3.6/site-packages/django/db/backends/base/creation.py\", line 78, in create_test_db\n\tself.connection._test_serialized_contents = self.serialize_db_to_string()\n File \"/usr/local/lib/python3.6/site-packages/django/db/backends/base/creation.py\", line 121, in serialize_db_to_string\n\tserializers.serialize(\"json\", get_objects(), indent=None, stream=out)\n File \"/usr/local/lib/python3.6/site-packages/django/core/serializers/__init__.py\", line 128, in serialize\n\ts.serialize(queryset, **options)\n File \"/usr/local/lib/python3.6/site-packages/django/core/serializers/base.py\", line 90, in serialize\n\tfor count, obj in enumerate(queryset, start=1):\n File \"/usr/local/lib/python3.6/site-packages/django/db/backends/base/creation.py\", line 118, in get_objects\n\tyield from queryset.iterator()\n File \"/usr/local/lib/python3.6/site-packages/django/db/models/query.py\", line 360, in _iterator\n\tyield from self._iterable_class(self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size)\n File \"/usr/local/lib/python3.6/site-packages/django/db/models/query.py\", line 53, in __iter__\n\tresults = compiler.execute_sql(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)\n File \"/usr/local/lib/python3.6/site-packages/django/db/models/sql/compiler.py\", line 1159, in execute_sql\n\tcursor.close()\npsycopg2.errors.InvalidCursorName: cursor \"_django_curs_139860821038912_sync_1\" does not exist\n", + "golden_patch": "diff --git a/django/db/backends/base/creation.py b/django/db/backends/base/creation.py\n--- a/django/db/backends/base/creation.py\n+++ b/django/db/backends/base/creation.py\n@@ -58,7 +58,14 @@ def create_test_db(self, verbosity=1, autoclobber=False, serialize=True, keepdb=\n settings.DATABASES[self.connection.alias][\"NAME\"] = test_database_name\n self.connection.settings_dict[\"NAME\"] = test_database_name\n \n- if self.connection.settings_dict['TEST']['MIGRATE']:\n+ try:\n+ if self.connection.settings_dict['TEST']['MIGRATE'] is False:\n+ # Disable migrations for all apps.\n+ old_migration_modules = settings.MIGRATION_MODULES\n+ settings.MIGRATION_MODULES = {\n+ app.label: None\n+ for app in apps.get_app_configs()\n+ }\n # We report migrate messages at one level lower than that\n # requested. This ensures we don't get flooded with messages during\n # testing (unless you really ask to be flooded).\n@@ -69,6 +76,9 @@ def create_test_db(self, verbosity=1, autoclobber=False, serialize=True, keepdb=\n database=self.connection.alias,\n run_syncdb=True,\n )\n+ finally:\n+ if self.connection.settings_dict['TEST']['MIGRATE'] is False:\n+ settings.MIGRATION_MODULES = old_migration_modules\n \n # We then serialize the current state of the database into a string\n # and store it on the connection. This slightly horrific process is so people\n", + "test_patch": "diff --git a/tests/backends/base/app_unmigrated/__init__.py b/tests/backends/base/app_unmigrated/__init__.py\nnew file mode 100644\ndiff --git a/tests/backends/base/app_unmigrated/migrations/0001_initial.py b/tests/backends/base/app_unmigrated/migrations/0001_initial.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/backends/base/app_unmigrated/migrations/0001_initial.py\n@@ -0,0 +1,17 @@\n+from django.db import migrations, models\n+\n+\n+class Migration(migrations.Migration):\n+ initial = True\n+\n+ dependencies = []\n+\n+ operations = [\n+ migrations.CreateModel(\n+ name='Foo',\n+ fields=[\n+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n+ ('name', models.CharField(max_length=255)),\n+ ],\n+ ),\n+ ]\ndiff --git a/tests/backends/base/app_unmigrated/migrations/__init__.py b/tests/backends/base/app_unmigrated/migrations/__init__.py\nnew file mode 100644\ndiff --git a/tests/backends/base/app_unmigrated/models.py b/tests/backends/base/app_unmigrated/models.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/backends/base/app_unmigrated/models.py\n@@ -0,0 +1,8 @@\n+from django.db import models\n+\n+\n+class Foo(models.Model):\n+ name = models.CharField(max_length=255)\n+\n+ class Meta:\n+ app_label = 'app_unmigrated'\ndiff --git a/tests/backends/base/test_creation.py b/tests/backends/base/test_creation.py\n--- a/tests/backends/base/test_creation.py\n+++ b/tests/backends/base/test_creation.py\n@@ -6,6 +6,7 @@\n TEST_DATABASE_PREFIX, BaseDatabaseCreation,\n )\n from django.test import SimpleTestCase, TransactionTestCase\n+from django.test.utils import override_settings\n \n from ..models import (\n CircularA, CircularB, Object, ObjectReference, ObjectSelfReference,\n@@ -49,31 +50,57 @@ def test_custom_test_name_with_test_prefix(self):\n self.assertEqual(signature[3], test_name)\n \n \n+@override_settings(INSTALLED_APPS=['backends.base.app_unmigrated'])\n @mock.patch.object(connection, 'ensure_connection')\n-@mock.patch('django.core.management.commands.migrate.Command.handle', return_value=None)\n+@mock.patch.object(connection, 'prepare_database')\n+@mock.patch('django.db.migrations.recorder.MigrationRecorder.has_table', return_value=False)\n+@mock.patch('django.db.migrations.executor.MigrationExecutor.migrate')\n+@mock.patch('django.core.management.commands.migrate.Command.sync_apps')\n class TestDbCreationTests(SimpleTestCase):\n- def test_migrate_test_setting_false(self, mocked_migrate, mocked_ensure_connection):\n+ available_apps = ['backends.base.app_unmigrated']\n+\n+ def test_migrate_test_setting_false(self, mocked_sync_apps, mocked_migrate, *mocked_objects):\n test_connection = get_connection_copy()\n test_connection.settings_dict['TEST']['MIGRATE'] = False\n creation = test_connection.creation_class(test_connection)\n+ if connection.vendor == 'oracle':\n+ # Don't close connection on Oracle.\n+ creation.connection.close = mock.Mock()\n old_database_name = test_connection.settings_dict['NAME']\n try:\n with mock.patch.object(creation, '_create_test_db'):\n creation.create_test_db(verbosity=0, autoclobber=True, serialize=False)\n- mocked_migrate.assert_not_called()\n+ # Migrations don't run.\n+ mocked_migrate.assert_called()\n+ args, kwargs = mocked_migrate.call_args\n+ self.assertEqual(args, ([],))\n+ self.assertEqual(kwargs['plan'], [])\n+ # App is synced.\n+ mocked_sync_apps.assert_called()\n+ mocked_args, _ = mocked_sync_apps.call_args\n+ self.assertEqual(mocked_args[1], {'app_unmigrated'})\n finally:\n with mock.patch.object(creation, '_destroy_test_db'):\n creation.destroy_test_db(old_database_name, verbosity=0)\n \n- def test_migrate_test_setting_true(self, mocked_migrate, mocked_ensure_connection):\n+ def test_migrate_test_setting_true(self, mocked_sync_apps, mocked_migrate, *mocked_objects):\n test_connection = get_connection_copy()\n test_connection.settings_dict['TEST']['MIGRATE'] = True\n creation = test_connection.creation_class(test_connection)\n+ if connection.vendor == 'oracle':\n+ # Don't close connection on Oracle.\n+ creation.connection.close = mock.Mock()\n old_database_name = test_connection.settings_dict['NAME']\n try:\n with mock.patch.object(creation, '_create_test_db'):\n creation.create_test_db(verbosity=0, autoclobber=True, serialize=False)\n- mocked_migrate.assert_called_once()\n+ # Migrations run.\n+ mocked_migrate.assert_called()\n+ args, kwargs = mocked_migrate.call_args\n+ self.assertEqual(args, ([('app_unmigrated', '0001_initial')],))\n+ self.assertEqual(len(kwargs['plan']), 1)\n+ # App is not synced.\n+ mocked_sync_apps.assert_not_called()\n finally:\n with mock.patch.object(creation, '_destroy_test_db'):\n creation.destroy_test_db(old_database_name, verbosity=0)\n", + "fail_to_pass": "[\"test_migrate_test_setting_false (backends.base.test_creation.TestDbCreationTests)\"]", + "pass_to_pass": "[\"test_custom_test_name (backends.base.test_creation.TestDbSignatureTests)\", \"test_custom_test_name_with_test_prefix (backends.base.test_creation.TestDbSignatureTests)\", \"test_default_name (backends.base.test_creation.TestDbSignatureTests)\", \"test_migrate_test_setting_true (backends.base.test_creation.TestDbCreationTests)\", \"test_circular_reference (backends.base.test_creation.TestDeserializeDbFromString)\", \"test_circular_reference_with_natural_key (backends.base.test_creation.TestDeserializeDbFromString)\", \"test_self_reference (backends.base.test_creation.TestDeserializeDbFromString)\"]", + "expected_spans": { + "django/db/backends/base/creation.py": [ + "BaseDatabaseCreation.create_test_db" + ] + }, + "test_file_spans": { + "tests/backends/base/test_creation.py": [ + "imports", + "TestDbCreationTests", + "TestDbCreationTests.test_migrate_test_setting_false", + "TestDbCreationTests.test_migrate_test_setting_true" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-20154", + "repo": "sympy/sympy", + "base_commit": "bdb49c4abfb35554a3c8ce761696ffff3bb837fe", + "problem_statement": "partitions() reusing the output dictionaries\nThe partitions() iterator in sympy.utilities.iterables reuses the output dictionaries. There is a caveat about it in the docstring. \r\n\r\nI'm wondering if it's really that important for it to do this. It shouldn't be that much of a performance loss to copy the dictionary before yielding it. This behavior is very confusing. It means that something as simple as list(partitions()) will give an apparently wrong result. And it can lead to much more subtle bugs if the partitions are used in a nontrivial way. \n", + "golden_patch": "diff --git a/sympy/utilities/iterables.py b/sympy/utilities/iterables.py\n--- a/sympy/utilities/iterables.py\n+++ b/sympy/utilities/iterables.py\n@@ -1738,21 +1738,6 @@ def partitions(n, m=None, k=None, size=False):\n {2: 1, 4: 1}\n {3: 2}\n \n- Note that the _same_ dictionary object is returned each time.\n- This is for speed: generating each partition goes quickly,\n- taking constant time, independent of n.\n-\n- >>> [p for p in partitions(6, k=2)]\n- [{1: 6}, {1: 6}, {1: 6}, {1: 6}]\n-\n- If you want to build a list of the returned dictionaries then\n- make a copy of them:\n-\n- >>> [p.copy() for p in partitions(6, k=2)] # doctest: +SKIP\n- [{2: 3}, {1: 2, 2: 2}, {1: 4, 2: 1}, {1: 6}]\n- >>> [(M, p.copy()) for M, p in partitions(6, k=2, size=True)] # doctest: +SKIP\n- [(3, {2: 3}), (4, {1: 2, 2: 2}), (5, {1: 4, 2: 1}), (6, {1: 6})]\n-\n References\n ==========\n \n@@ -1802,9 +1787,9 @@ def partitions(n, m=None, k=None, size=False):\n keys.append(r)\n room = m - q - bool(r)\n if size:\n- yield sum(ms.values()), ms\n+ yield sum(ms.values()), ms.copy()\n else:\n- yield ms\n+ yield ms.copy()\n \n while keys != [1]:\n # Reuse any 1's.\n@@ -1842,9 +1827,9 @@ def partitions(n, m=None, k=None, size=False):\n break\n room -= need\n if size:\n- yield sum(ms.values()), ms\n+ yield sum(ms.values()), ms.copy()\n else:\n- yield ms\n+ yield ms.copy()\n \n \n def ordered_partitions(n, m=None, sort=True):\n", + "test_patch": "diff --git a/sympy/utilities/tests/test_iterables.py b/sympy/utilities/tests/test_iterables.py\n--- a/sympy/utilities/tests/test_iterables.py\n+++ b/sympy/utilities/tests/test_iterables.py\n@@ -481,24 +481,24 @@ def test_partitions():\n assert list(partitions(6, None, 2, size=i)) != ans[i]\n assert list(partitions(6, 2, 0, size=i)) == ans[i]\n \n- assert [p.copy() for p in partitions(6, k=2)] == [\n+ assert [p for p in partitions(6, k=2)] == [\n {2: 3}, {1: 2, 2: 2}, {1: 4, 2: 1}, {1: 6}]\n \n- assert [p.copy() for p in partitions(6, k=3)] == [\n+ assert [p for p in partitions(6, k=3)] == [\n {3: 2}, {1: 1, 2: 1, 3: 1}, {1: 3, 3: 1}, {2: 3}, {1: 2, 2: 2},\n {1: 4, 2: 1}, {1: 6}]\n \n- assert [p.copy() for p in partitions(8, k=4, m=3)] == [\n+ assert [p for p in partitions(8, k=4, m=3)] == [\n {4: 2}, {1: 1, 3: 1, 4: 1}, {2: 2, 4: 1}, {2: 1, 3: 2}] == [\n- i.copy() for i in partitions(8, k=4, m=3) if all(k <= 4 for k in i)\n+ i for i in partitions(8, k=4, m=3) if all(k <= 4 for k in i)\n and sum(i.values()) <=3]\n \n- assert [p.copy() for p in partitions(S(3), m=2)] == [\n+ assert [p for p in partitions(S(3), m=2)] == [\n {3: 1}, {1: 1, 2: 1}]\n \n- assert [i.copy() for i in partitions(4, k=3)] == [\n+ assert [i for i in partitions(4, k=3)] == [\n {1: 1, 3: 1}, {2: 2}, {1: 2, 2: 1}, {1: 4}] == [\n- i.copy() for i in partitions(4) if all(k <= 3 for k in i)]\n+ i for i in partitions(4) if all(k <= 3 for k in i)]\n \n \n # Consistency check on output of _partitions and RGS_unrank.\n@@ -697,7 +697,7 @@ def test_reshape():\n \n \n def test_uniq():\n- assert list(uniq(p.copy() for p in partitions(4))) == \\\n+ assert list(uniq(p for p in partitions(4))) == \\\n [{4: 1}, {1: 1, 3: 1}, {2: 2}, {1: 2, 2: 1}, {1: 4}]\n assert list(uniq(x % 2 for x in range(5))) == [0, 1]\n assert list(uniq('a')) == ['a']\n", + "fail_to_pass": "[\"test_partitions\", \"test_uniq\"]", + "pass_to_pass": "[\"test_is_palindromic\", \"test_postorder_traversal\", \"test_flatten\", \"test_iproduct\", \"test_group\", \"test_subsets\", \"test_variations\", \"test_cartes\", \"test_filter_symbols\", \"test_numbered_symbols\", \"test_sift\", \"test_take\", \"test_dict_merge\", \"test_prefixes\", \"test_postfixes\", \"test_topological_sort\", \"test_strongly_connected_components\", \"test_connected_components\", \"test_rotate\", \"test_multiset_partitions\", \"test_multiset_combinations\", \"test_multiset_permutations\", \"test_binary_partitions\", \"test_bell_perm\", \"test_involutions\", \"test_derangements\", \"test_necklaces\", \"test_bracelets\", \"test_generate_oriented_forest\", \"test_unflatten\", \"test_common_prefix_suffix\", \"test_minlex\", \"test_ordered\", \"test_runs\", \"test_reshape\", \"test_kbins\", \"test_has_dups\", \"test__partition\", \"test_ordered_partitions\", \"test_rotations\"]", + "expected_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + }, + "test_file_spans": { + "sympy/utilities/tests/test_iterables.py": [ + "test_partitions", + "test_uniq" + ] + }, + "resolved_by": [ + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "imports", + "is_palindromic", + "flatten", + "reshape", + "group", + "_iproduct2", + "iproduct", + "interactive_traversal", + "ibin", + "variations", + "subsets", + "numbered_symbols", + "capture", + "take", + "dict_merge", + "common_suffix", + "prefixes", + "postfixes", + "topological_sort", + "strongly_connected_components", + "connected_components", + "least_rotation", + "multiset_combinations", + "multiset_permutations", + "_partition", + "_set_partitions", + "multiset_partitions", + "partitions", + "ordered_partitions", + "binary_partitions", + "has_variety", + "uniq", + "generate_bell", + "generate_derangements", + "necklaces", + "generate_oriented_forest", + "minlex", + "kbins", + "permute_signs", + "signed_permutations", + "roundrobin" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "imports", + "is_palindromic", + "flatten", + "reshape", + "group", + "_iproduct2", + "iproduct", + "interactive_traversal", + "ibin", + "variations", + "subsets", + "numbered_symbols", + "capture", + "take", + "dict_merge", + "common_suffix", + "prefixes", + "postfixes", + "topological_sort", + "strongly_connected_components", + "connected_components", + "least_rotation", + "multiset_combinations", + "multiset_permutations", + "_partition", + "_set_partitions", + "multiset_partitions", + "partitions", + "ordered_partitions", + "binary_partitions", + "has_variety", + "uniq", + "generate_bell", + "generate_derangements", + "necklaces", + "generate_oriented_forest", + "minlex", + "kbins", + "permute_signs", + "signed_permutations", + "roundrobin" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ], + "sympy/utilities/tests/test_iterables.py": [ + "test_partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions", + "ordered_partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions", + "ordered_partitions" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ], + "sympy/utilities/tests/test_iterables.py": [ + "test_partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + }, + "alternative_spans": { + "sympy/utilities/iterables.py": [ + "partitions" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sphinx-doc__sphinx-8273", + "repo": "sphinx-doc/sphinx", + "base_commit": "88b81a06eb635a1596617f8971fa97a84c069e93", + "problem_statement": "Generate man page section directories\n**Current man page generation does not conform to `MANPATH` search functionality**\r\nCurrently, all generated man pages are placed in to a single-level directory: `/man`. Unfortunately, this cannot be used in combination with the unix `MANPATH` environment variable. The `man` program explicitly looks for man pages in section directories (such as `man/man1`, etc.). \r\n\r\n**Describe the solution you'd like**\r\nIt would be great if sphinx would automatically create the section directories (e.g., `man/man1/`, `man/man3/`, etc.) and place each generated man page within appropriate section.\r\n\r\n**Describe alternatives you've considered**\r\nThis problem can be over come within our project\u2019s build system, ensuring the built man pages are installed in a correct location, but it would be nice if the build directory had the proper layout.\r\n\r\nI\u2019m happy to take a crack at implementing a fix, though this change in behavior may break some people who expect everything to appear in a `man/` directory. \r\n\n", + "golden_patch": "diff --git a/sphinx/builders/manpage.py b/sphinx/builders/manpage.py\n--- a/sphinx/builders/manpage.py\n+++ b/sphinx/builders/manpage.py\n@@ -24,7 +24,7 @@\n from sphinx.util import progress_message\n from sphinx.util.console import darkgreen # type: ignore\n from sphinx.util.nodes import inline_all_toctrees\n-from sphinx.util.osutil import make_filename_from_project\n+from sphinx.util.osutil import ensuredir, make_filename_from_project\n from sphinx.writers.manpage import ManualPageWriter, ManualPageTranslator\n \n \n@@ -80,7 +80,12 @@ def write(self, *ignored: Any) -> None:\n docsettings.authors = authors\n docsettings.section = section\n \n- targetname = '%s.%s' % (name, section)\n+ if self.config.man_make_section_directory:\n+ ensuredir(path.join(self.outdir, str(section)))\n+ targetname = '%s/%s.%s' % (section, name, section)\n+ else:\n+ targetname = '%s.%s' % (name, section)\n+\n logger.info(darkgreen(targetname) + ' { ', nonl=True)\n destination = FileOutput(\n destination_path=path.join(self.outdir, targetname),\n@@ -115,6 +120,7 @@ def setup(app: Sphinx) -> Dict[str, Any]:\n \n app.add_config_value('man_pages', default_man_pages, None)\n app.add_config_value('man_show_urls', False, None)\n+ app.add_config_value('man_make_section_directory', False, None)\n \n return {\n 'version': 'builtin',\n", + "test_patch": "diff --git a/tests/test_build_manpage.py b/tests/test_build_manpage.py\n--- a/tests/test_build_manpage.py\n+++ b/tests/test_build_manpage.py\n@@ -30,6 +30,13 @@ def test_all(app, status, warning):\n assert 'Footnotes' not in content\n \n \n+@pytest.mark.sphinx('man', testroot='basic',\n+ confoverrides={'man_make_section_directory': True})\n+def test_man_make_section_directory(app, status, warning):\n+ app.build()\n+ assert (app.outdir / '1' / 'python.1').exists()\n+\n+\n @pytest.mark.sphinx('man', testroot='directive-code')\n def test_captioned_code_block(app, status, warning):\n app.builder.build_all()\n", + "fail_to_pass": "[\"tests/test_build_manpage.py::test_man_make_section_directory\"]", + "pass_to_pass": "[\"tests/test_build_manpage.py::test_all\", \"tests/test_build_manpage.py::test_default_man_pages\", \"tests/test_build_manpage.py::test_rubric\"]", + "expected_spans": { + "sphinx/builders/manpage.py": [ + "imports", + "ManualPageBuilder.write", + "setup" + ] + }, + "test_file_spans": { + "tests/test_build_manpage.py": [ + "test_captioned_code_block" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sphinx-doc__sphinx-8282", + "repo": "sphinx-doc/sphinx", + "base_commit": "2c2335bbb8af99fa132e1573bbf45dc91584d5a2", + "problem_statement": "autodoc_typehints does not effect to overloaded callables\n**Describe the bug**\r\nautodoc_typehints does not effect to overloaded callables.\r\n\r\n**To Reproduce**\r\n\r\n```\r\n# in conf.py\r\nautodoc_typehints = 'none'\r\n```\r\n```\r\n# in index.rst\r\n.. automodule:: example\r\n :members:\r\n :undoc-members:\r\n```\r\n```\r\n# in example.py\r\nfrom typing import overload\r\n\r\n\r\n@overload\r\ndef foo(x: int) -> int:\r\n ...\r\n\r\n\r\n@overload\r\ndef foo(x: float) -> float:\r\n ...\r\n\r\n\r\ndef foo(x):\r\n return x\r\n```\r\n\r\n**Expected behavior**\r\nAll typehints for overloaded callables are obeyed `autodoc_typehints` setting.\r\n\r\n**Your project**\r\nNo\r\n\r\n**Screenshots**\r\nNo\r\n\r\n**Environment info**\r\n- OS: Mac\r\n- Python version: 3.8.2\r\n- Sphinx version: 3.1.0dev\r\n- Sphinx extensions: sphinx.ext.autodoc\r\n- Extra tools: No\r\n\r\n**Additional context**\r\nNo\n", + "golden_patch": "diff --git a/sphinx/ext/autodoc/__init__.py b/sphinx/ext/autodoc/__init__.py\n--- a/sphinx/ext/autodoc/__init__.py\n+++ b/sphinx/ext/autodoc/__init__.py\n@@ -1240,7 +1240,9 @@ def add_directive_header(self, sig: str) -> None:\n \n def format_signature(self, **kwargs: Any) -> str:\n sigs = []\n- if self.analyzer and '.'.join(self.objpath) in self.analyzer.overloads:\n+ if (self.analyzer and\n+ '.'.join(self.objpath) in self.analyzer.overloads and\n+ self.env.config.autodoc_typehints == 'signature'):\n # Use signatures for overloaded functions instead of the implementation function.\n overloaded = True\n else:\n@@ -1474,7 +1476,7 @@ def format_signature(self, **kwargs: Any) -> str:\n sigs = []\n \n overloads = self.get_overloaded_signatures()\n- if overloads:\n+ if overloads and self.env.config.autodoc_typehints == 'signature':\n # Use signatures for overloaded methods instead of the implementation method.\n method = safe_getattr(self._signature_class, self._signature_method_name, None)\n __globals__ = safe_getattr(method, '__globals__', {})\n@@ -1882,7 +1884,9 @@ def document_members(self, all_members: bool = False) -> None:\n \n def format_signature(self, **kwargs: Any) -> str:\n sigs = []\n- if self.analyzer and '.'.join(self.objpath) in self.analyzer.overloads:\n+ if (self.analyzer and\n+ '.'.join(self.objpath) in self.analyzer.overloads and\n+ self.env.config.autodoc_typehints == 'signature'):\n # Use signatures for overloaded methods instead of the implementation method.\n overloaded = True\n else:\n", + "test_patch": "diff --git a/tests/test_ext_autodoc_configs.py b/tests/test_ext_autodoc_configs.py\n--- a/tests/test_ext_autodoc_configs.py\n+++ b/tests/test_ext_autodoc_configs.py\n@@ -610,6 +610,54 @@ def test_autodoc_typehints_none(app):\n ]\n \n \n+@pytest.mark.sphinx('html', testroot='ext-autodoc',\n+ confoverrides={'autodoc_typehints': 'none'})\n+def test_autodoc_typehints_none_for_overload(app):\n+ options = {\"members\": None}\n+ actual = do_autodoc(app, 'module', 'target.overload', options)\n+ assert list(actual) == [\n+ '',\n+ '.. py:module:: target.overload',\n+ '',\n+ '',\n+ '.. py:class:: Bar(x, y)',\n+ ' :module: target.overload',\n+ '',\n+ ' docstring',\n+ '',\n+ '',\n+ '.. py:class:: Baz(x, y)',\n+ ' :module: target.overload',\n+ '',\n+ ' docstring',\n+ '',\n+ '',\n+ '.. py:class:: Foo(x, y)',\n+ ' :module: target.overload',\n+ '',\n+ ' docstring',\n+ '',\n+ '',\n+ '.. py:class:: Math()',\n+ ' :module: target.overload',\n+ '',\n+ ' docstring',\n+ '',\n+ '',\n+ ' .. py:method:: Math.sum(x, y)',\n+ ' :module: target.overload',\n+ '',\n+ ' docstring',\n+ '',\n+ '',\n+ '.. py:function:: sum(x, y)',\n+ ' :module: target.overload',\n+ '',\n+ ' docstring',\n+ '',\n+ ]\n+\n+\n @pytest.mark.sphinx('text', testroot='ext-autodoc',\n confoverrides={'autodoc_typehints': \"description\"})\n def test_autodoc_typehints_description(app):\n", + "fail_to_pass": "[\"tests/test_ext_autodoc_configs.py::test_autodoc_typehints_none_for_overload\"]", + "pass_to_pass": "[\"tests/test_ext_autodoc_configs.py::test_autoclass_content_class\", \"tests/test_ext_autodoc_configs.py::test_autoclass_content_init\", \"tests/test_ext_autodoc_configs.py::test_autoclass_content_both\", \"tests/test_ext_autodoc_configs.py::test_autodoc_inherit_docstrings\", \"tests/test_ext_autodoc_configs.py::test_autodoc_docstring_signature\", \"tests/test_ext_autodoc_configs.py::test_autoclass_content_and_docstring_signature_class\", \"tests/test_ext_autodoc_configs.py::test_autoclass_content_and_docstring_signature_init\", \"tests/test_ext_autodoc_configs.py::test_autoclass_content_and_docstring_signature_both\", \"tests/test_ext_autodoc_configs.py::test_mocked_module_imports\", \"tests/test_ext_autodoc_configs.py::test_autodoc_typehints_signature\", \"tests/test_ext_autodoc_configs.py::test_autodoc_typehints_none\", \"tests/test_ext_autodoc_configs.py::test_autodoc_typehints_description\", \"tests/test_ext_autodoc_configs.py::test_autodoc_typehints_description_for_invalid_node\", \"tests/test_ext_autodoc_configs.py::test_autodoc_type_aliases\", \"tests/test_ext_autodoc_configs.py::test_autodoc_default_options\", \"tests/test_ext_autodoc_configs.py::test_autodoc_default_options_with_values\"]", + "expected_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "FunctionDocumenter.format_signature", + "ClassDocumenter.format_signature", + "MethodDocumenter.format_signature" + ] + }, + "test_file_spans": { + "tests/test_ext_autodoc_configs.py": [ + "test_autodoc_typehints_description" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-20212", + "repo": "sympy/sympy", + "base_commit": "a106f4782a9dbe7f8fd16030f15401d977e03ae9", + "problem_statement": "0**-oo produces 0, the documentation says it should produce zoo\nUsing SymPy 1.5.1, evaluate `0**-oo` produces `0`.\r\n\r\nThe documentation for the Pow class states that it should return `ComplexInfinity`, aka `zoo`\r\n\r\n| expr | value | reason |\r\n| :-- | :-- | :--|\r\n| `0**-oo` | `zoo` | This is not strictly true, as 0**oo may be oscillating between positive and negative values or rotating in the complex plane. It is convenient, however, when the base is positive.|\r\n\n", + "golden_patch": "diff --git a/sympy/core/power.py b/sympy/core/power.py\n--- a/sympy/core/power.py\n+++ b/sympy/core/power.py\n@@ -291,6 +291,8 @@ def __new__(cls, b, e, evaluate=None):\n ).warn()\n \n if evaluate:\n+ if b is S.Zero and e is S.NegativeInfinity:\n+ return S.ComplexInfinity\n if e is S.ComplexInfinity:\n return S.NaN\n if e is S.Zero:\n", + "test_patch": "diff --git a/sympy/core/tests/test_power.py b/sympy/core/tests/test_power.py\n--- a/sympy/core/tests/test_power.py\n+++ b/sympy/core/tests/test_power.py\n@@ -266,6 +266,9 @@ def test_zero():\n assert 0**(2*x*y) == 0**(x*y)\n assert 0**(-2*x*y) == S.ComplexInfinity**(x*y)\n \n+ #Test issue 19572\n+ assert 0 ** -oo is zoo\n+ assert power(0, -oo) is zoo\n \n def test_pow_as_base_exp():\n x = Symbol('x')\n", + "fail_to_pass": "[\"test_zero\"]", + "pass_to_pass": "[\"test_rational\", \"test_large_rational\", \"test_negative_real\", \"test_expand\", \"test_issue_3449\", \"test_issue_3866\", \"test_negative_one\", \"test_issue_4362\", \"test_Pow_Expr_args\", \"test_Pow_signs\", \"test_power_with_noncommutative_mul_as_base\", \"test_power_rewrite_exp\", \"test_pow_as_base_exp\", \"test_nseries\", \"test_issue_6100_12942_4473\", \"test_issue_6208\", \"test_issue_6990\", \"test_issue_6068\", \"test_issue_6782\", \"test_issue_6653\", \"test_issue_6429\", \"test_issue_7638\", \"test_issue_8582\", \"test_issue_8650\", \"test_issue_13914\", \"test_better_sqrt\", \"test_issue_2993\", \"test_issue_17450\", \"test_issue_18190\", \"test_issue_14815\", \"test_issue_18509\", \"test_issue_18762\"]", + "expected_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "test_file_spans": { + "sympy/core/tests/test_power.py": [ + "test_zero" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "sympy/core/numbers.py": [ + "Zero._eval_power" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sympy/core/numbers.py": [ + "Zero._eval_power" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sympy/core/power.py": [ + "imports", + "isqrt", + "integer_nthroot", + "_integer_nthroot_python", + "integer_log", + "Pow", + "Pow.__new__", + "Pow._eval_power", + "Pow._eval_Mod", + "Pow._eval_is_extended_negative", + "Pow._eval_is_zero", + "Pow._eval_is_integer", + "Pow._eval_is_extended_real", + "Pow._eval_is_complex", + "Pow._eval_is_imaginary", + "Pow._eval_is_prime", + "Pow._eval_is_composite", + "Pow._eval_subs", + "Pow.as_base_exp", + "Pow._eval_adjoint", + "Pow._eval_conjugate", + "Pow._eval_transpose", + "Pow._eval_expand_power_base", + "Pow._eval_expand_multinomial", + "Pow.as_real_imag", + "Pow._eval_derivative", + "Pow._eval_evalf", + "Pow._eval_is_polynomial", + "Pow._eval_is_rational", + "Pow._eval_is_algebraic", + "Pow._eval_is_rational_function", + "Pow._eval_is_meromorphic", + "Pow._eval_is_algebraic_expr", + "Pow._eval_rewrite_as_exp", + "Pow.as_numer_denom", + "Pow.matches", + "Pow._eval_nseries", + "Pow._eval_as_leading_term", + "Pow._taylor_term", + "Pow._sage_", + "Pow.as_content_primitive", + "Pow.is_constant", + "Pow._eval_difference_delta", + "impl" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "imports", + "isqrt", + "integer_nthroot", + "_integer_nthroot_python", + "integer_log", + "Pow", + "Pow.__new__", + "Pow._eval_power", + "Pow._eval_Mod", + "Pow._eval_is_extended_negative", + "Pow._eval_is_zero", + "Pow._eval_is_integer", + "Pow._eval_is_extended_real", + "Pow._eval_is_complex", + "Pow._eval_is_imaginary", + "Pow._eval_is_prime", + "Pow._eval_is_composite", + "Pow._eval_subs", + "Pow.as_base_exp", + "Pow._eval_adjoint", + "Pow._eval_conjugate", + "Pow._eval_transpose", + "Pow._eval_expand_power_base", + "Pow._eval_expand_multinomial", + "Pow.as_real_imag", + "Pow._eval_derivative", + "Pow._eval_evalf", + "Pow._eval_is_polynomial", + "Pow._eval_is_rational", + "Pow._eval_is_algebraic", + "Pow._eval_is_rational_function", + "Pow._eval_is_meromorphic", + "Pow._eval_is_algebraic_expr", + "Pow._eval_rewrite_as_exp", + "Pow.as_numer_denom", + "Pow.matches", + "Pow._eval_nseries", + "Pow._eval_as_leading_term", + "Pow._taylor_term", + "Pow._sage_", + "Pow.as_content_primitive", + "Pow.is_constant", + "Pow._eval_difference_delta", + "impl" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ], + "sympy/core/tests/test_power.py": [ + "test_zero" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sympy/core/numbers.py": [ + "Zero._eval_power" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sympy/core/numbers.py": [ + "Zero._eval_power" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ], + "sympy/core/tests/test_power.py": [] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + }, + "alternative_spans": { + "sympy/core/power.py": [ + "Pow.__new__" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "pydata__xarray-4493", + "repo": "pydata/xarray", + "base_commit": "a5f53e203c52a7605d5db799864046471115d04f", + "problem_statement": "DataSet.update causes chunked dask DataArray to evalute its values eagerly \n**What happened**:\r\nUsed `DataSet.update` to update a chunked dask DataArray, but the DataArray is no longer chunked after the update.\r\n\r\n**What you expected to happen**:\r\nThe chunked DataArray should still be chunked after the update\r\n\r\n**Minimal Complete Verifiable Example**:\r\n\r\n```python\r\nfoo = xr.DataArray(np.random.randn(3, 3), dims=(\"x\", \"y\")).chunk() # foo is chunked\r\nds = xr.Dataset({\"foo\": foo, \"bar\": (\"x\", [1, 2, 3])}) # foo is still chunked here\r\nds # you can verify that foo is chunked\r\n```\r\n```python\r\nupdate_dict = {\"foo\": ((\"x\", \"y\"), ds.foo[1:, :]), \"bar\": (\"x\", ds.bar[1:])}\r\nupdate_dict[\"foo\"][1] # foo is still chunked\r\n```\r\n```python\r\nds.update(update_dict)\r\nds # now foo is no longer chunked\r\n```\r\n\r\n**Environment**:\r\n\r\n
Output of xr.show_versions()\r\n\r\n```\r\ncommit: None\r\npython: 3.8.3 (default, Jul 2 2020, 11:26:31) \r\n[Clang 10.0.0 ]\r\npython-bits: 64\r\nOS: Darwin\r\nOS-release: 19.6.0\r\nmachine: x86_64\r\nprocessor: i386\r\nbyteorder: little\r\nLC_ALL: None\r\nLANG: en_US.UTF-8\r\nLOCALE: en_US.UTF-8\r\nlibhdf5: 1.10.6\r\nlibnetcdf: None\r\n\r\nxarray: 0.16.0\r\npandas: 1.0.5\r\nnumpy: 1.18.5\r\nscipy: 1.5.0\r\nnetCDF4: None\r\npydap: None\r\nh5netcdf: None\r\nh5py: 2.10.0\r\nNio: None\r\nzarr: None\r\ncftime: None\r\nnc_time_axis: None\r\nPseudoNetCDF: None\r\nrasterio: None\r\ncfgrib: None\r\niris: None\r\nbottleneck: None\r\ndask: 2.20.0\r\ndistributed: 2.20.0\r\nmatplotlib: 3.2.2\r\ncartopy: None\r\nseaborn: None\r\nnumbagg: None\r\npint: None\r\nsetuptools: 49.2.0.post20200714\r\npip: 20.1.1\r\nconda: None\r\npytest: 5.4.3\r\nIPython: 7.16.1\r\nsphinx: None\r\n```\r\n\r\n
\nDataset constructor with DataArray triggers computation\nIs it intentional that creating a Dataset with a DataArray and dimension names for a single variable causes computation of that variable? In other words, why does ```xr.Dataset(dict(a=('d0', xr.DataArray(da.random.random(10)))))``` cause the dask array to compute?\r\n\r\nA longer example:\r\n\r\n```python\r\nimport dask.array as da\r\nimport xarray as xr\r\nx = da.random.randint(1, 10, size=(100, 25))\r\nds = xr.Dataset(dict(a=xr.DataArray(x, dims=('x', 'y'))))\r\ntype(ds.a.data)\r\ndask.array.core.Array\r\n\r\n# Recreate the dataset with the same array, but also redefine the dimensions\r\nds2 = xr.Dataset(dict(a=(('x', 'y'), ds.a))\r\ntype(ds2.a.data)\r\nnumpy.ndarray\r\n```\r\n\r\n\n", + "golden_patch": "diff --git a/xarray/core/variable.py b/xarray/core/variable.py\n--- a/xarray/core/variable.py\n+++ b/xarray/core/variable.py\n@@ -120,6 +120,16 @@ def as_variable(obj, name=None) -> \"Union[Variable, IndexVariable]\":\n if isinstance(obj, Variable):\n obj = obj.copy(deep=False)\n elif isinstance(obj, tuple):\n+ if isinstance(obj[1], DataArray):\n+ # TODO: change into TypeError\n+ warnings.warn(\n+ (\n+ \"Using a DataArray object to construct a variable is\"\n+ \" ambiguous, please extract the data using the .data property.\"\n+ \" This will raise a TypeError in 0.19.0.\"\n+ ),\n+ DeprecationWarning,\n+ )\n try:\n obj = Variable(*obj)\n except (TypeError, ValueError) as error:\n", + "test_patch": "diff --git a/xarray/tests/test_dask.py b/xarray/tests/test_dask.py\n--- a/xarray/tests/test_dask.py\n+++ b/xarray/tests/test_dask.py\n@@ -1233,7 +1233,7 @@ def test_map_blocks_to_array(map_ds):\n lambda x: x.drop_vars(\"x\"),\n lambda x: x.expand_dims(k=[1, 2, 3]),\n lambda x: x.expand_dims(k=3),\n- lambda x: x.assign_coords(new_coord=(\"y\", x.y * 2)),\n+ lambda x: x.assign_coords(new_coord=(\"y\", x.y.data * 2)),\n lambda x: x.astype(np.int32),\n lambda x: x.x,\n ],\ndiff --git a/xarray/tests/test_dataset.py b/xarray/tests/test_dataset.py\n--- a/xarray/tests/test_dataset.py\n+++ b/xarray/tests/test_dataset.py\n@@ -4959,13 +4959,13 @@ def test_reduce_keepdims(self):\n # Coordinates involved in the reduction should be removed\n actual = ds.mean(keepdims=True)\n expected = Dataset(\n- {\"a\": ([\"x\", \"y\"], np.mean(ds.a, keepdims=True))}, coords={\"c\": ds.c}\n+ {\"a\": ([\"x\", \"y\"], np.mean(ds.a, keepdims=True).data)}, coords={\"c\": ds.c}\n )\n assert_identical(expected, actual)\n \n actual = ds.mean(\"x\", keepdims=True)\n expected = Dataset(\n- {\"a\": ([\"x\", \"y\"], np.mean(ds.a, axis=0, keepdims=True))},\n+ {\"a\": ([\"x\", \"y\"], np.mean(ds.a, axis=0, keepdims=True).data)},\n coords={\"y\": ds.y, \"c\": ds.c},\n )\n assert_identical(expected, actual)\ndiff --git a/xarray/tests/test_interp.py b/xarray/tests/test_interp.py\n--- a/xarray/tests/test_interp.py\n+++ b/xarray/tests/test_interp.py\n@@ -190,7 +190,7 @@ def func(obj, dim, new_x):\n \"w\": xdest[\"w\"],\n \"z2\": xdest[\"z2\"],\n \"y\": da[\"y\"],\n- \"x\": ((\"z\", \"w\"), xdest),\n+ \"x\": ((\"z\", \"w\"), xdest.data),\n \"x2\": ((\"z\", \"w\"), func(da[\"x2\"], \"x\", xdest)),\n },\n )\ndiff --git a/xarray/tests/test_variable.py b/xarray/tests/test_variable.py\n--- a/xarray/tests/test_variable.py\n+++ b/xarray/tests/test_variable.py\n@@ -8,7 +8,7 @@\n import pytest\n import pytz\n \n-from xarray import Coordinate, Dataset, IndexVariable, Variable, set_options\n+from xarray import Coordinate, DataArray, Dataset, IndexVariable, Variable, set_options\n from xarray.core import dtypes, duck_array_ops, indexing\n from xarray.core.common import full_like, ones_like, zeros_like\n from xarray.core.indexing import (\n@@ -1081,6 +1081,9 @@ def test_as_variable(self):\n td = np.array([timedelta(days=x) for x in range(10)])\n assert as_variable(td, \"time\").dtype.kind == \"m\"\n \n+ with pytest.warns(DeprecationWarning):\n+ as_variable((\"x\", DataArray([])))\n+\n def test_repr(self):\n v = Variable([\"time\", \"x\"], [[1, 2, 3], [4, 5, 6]], {\"foo\": \"bar\"})\n expected = dedent(\n", + "fail_to_pass": "[\"xarray/tests/test_variable.py::TestVariable::test_as_variable\"]", + "pass_to_pass": "[\"xarray/tests/test_dask.py::test_raise_if_dask_computes\", \"xarray/tests/test_dask.py::TestVariable::test_basics\", \"xarray/tests/test_dask.py::TestVariable::test_copy\", \"xarray/tests/test_dask.py::TestVariable::test_chunk\", \"xarray/tests/test_dask.py::TestVariable::test_indexing\", \"xarray/tests/test_dask.py::TestVariable::test_squeeze\", \"xarray/tests/test_dask.py::TestVariable::test_equals\", \"xarray/tests/test_dask.py::TestVariable::test_transpose\", \"xarray/tests/test_dask.py::TestVariable::test_shift\", \"xarray/tests/test_dask.py::TestVariable::test_roll\", \"xarray/tests/test_dask.py::TestVariable::test_unary_op\", \"xarray/tests/test_dask.py::TestVariable::test_binary_op\", \"xarray/tests/test_dask.py::TestVariable::test_repr\", \"xarray/tests/test_dask.py::TestVariable::test_pickle\", \"xarray/tests/test_dask.py::TestVariable::test_reduce\", \"xarray/tests/test_dask.py::TestVariable::test_missing_values\", \"xarray/tests/test_dask.py::TestVariable::test_concat\", \"xarray/tests/test_dask.py::TestVariable::test_missing_methods\", \"xarray/tests/test_dask.py::TestVariable::test_univariate_ufunc\", \"xarray/tests/test_dask.py::TestVariable::test_bivariate_ufunc\", \"xarray/tests/test_dask.py::TestVariable::test_compute\", \"xarray/tests/test_dask.py::TestVariable::test_persist\", \"xarray/tests/test_dask.py::TestVariable::test_tokenize_duck_dask_array\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_rechunk\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_new_chunk\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_lazy_dataset\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_lazy_array\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_compute\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_persist\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_concat_loads_variables\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_groupby\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_rolling\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_groupby_first\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_reindex\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_to_dataset_roundtrip\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_merge\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_ufuncs\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_where_dispatching\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_simultaneous_compute\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_stack\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_dot\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_dataarray_repr\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_dataset_repr\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_dataarray_pickle\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_dataset_pickle\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_dataarray_getattr\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_dataset_getattr\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_values\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_from_dask_variable\", \"xarray/tests/test_dask.py::TestDataArrayAndDataset::test_tokenize_duck_dask_array\", \"xarray/tests/test_dask.py::TestToDaskDataFrame::test_to_dask_dataframe\", \"xarray/tests/test_dask.py::TestToDaskDataFrame::test_to_dask_dataframe_2D\", \"xarray/tests/test_dask.py::TestToDaskDataFrame::test_to_dask_dataframe_coordinates\", \"xarray/tests/test_dask.py::TestToDaskDataFrame::test_to_dask_dataframe_not_daskarray\", \"xarray/tests/test_dask.py::TestToDaskDataFrame::test_to_dask_dataframe_no_coordinate\", \"xarray/tests/test_dask.py::TestToDaskDataFrame::test_to_dask_dataframe_dim_order\", \"xarray/tests/test_dask.py::test_dask_kwargs_variable[load]\", \"xarray/tests/test_dask.py::test_dask_kwargs_variable[compute]\", \"xarray/tests/test_dask.py::test_dask_kwargs_dataarray[load]\", \"xarray/tests/test_dask.py::test_dask_kwargs_dataarray[compute]\", \"xarray/tests/test_dask.py::test_dask_kwargs_dataarray[persist]\", \"xarray/tests/test_dask.py::test_dask_kwargs_dataset[load]\", \"xarray/tests/test_dask.py::test_dask_kwargs_dataset[compute]\", \"xarray/tests/test_dask.py::test_dask_kwargs_dataset[persist]\", \"xarray/tests/test_dask.py::test_persist_Dataset[0]\", \"xarray/tests/test_dask.py::test_persist_DataArray[0]\", \"xarray/tests/test_dask.py::test_persist_DataArray[1]\", \"xarray/tests/test_dask.py::test_dataarray_with_dask_coords\", \"xarray/tests/test_dask.py::test_basic_compute\", \"xarray/tests/test_dask.py::test_dask_layers_and_dependencies\", \"xarray/tests/test_dask.py::test_unify_chunks\", \"xarray/tests/test_dask.py::test_unify_chunks_shallow_copy[0-obj0]\", \"xarray/tests/test_dask.py::test_unify_chunks_shallow_copy[0-obj1]\", \"xarray/tests/test_dask.py::test_unify_chunks_shallow_copy[1-obj0]\", \"xarray/tests/test_dask.py::test_unify_chunks_shallow_copy[1-obj1]\", \"xarray/tests/test_dask.py::test_auto_chunk_da[obj0]\", \"xarray/tests/test_dask.py::test_make_meta\", \"xarray/tests/test_dask.py::test_identical_coords_no_computes\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[0-obj0]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[0-obj1]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[0-obj2]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[0-obj3]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[1-obj0]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[1-obj1]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[1-obj2]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[1-obj3]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[2-obj0]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[2-obj1]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[2-obj2]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[2-obj3]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[3-obj0]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[3-obj1]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[3-obj2]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[3-obj3]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[4-obj0]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[4-obj1]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[4-obj2]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[4-obj3]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[5-obj0]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[5-obj1]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[5-obj2]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[5-obj3]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[6-obj0]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[6-obj1]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[6-obj2]\", \"xarray/tests/test_dask.py::test_token_changes_on_transform[6-obj3]\", \"xarray/tests/test_dask.py::test_token_changes_when_data_changes[obj0]\", \"xarray/tests/test_dask.py::test_token_changes_when_data_changes[obj1]\", \"xarray/tests/test_dask.py::test_token_changes_when_data_changes[obj2]\", \"xarray/tests/test_dask.py::test_token_changes_when_data_changes[obj3]\", \"xarray/tests/test_dask.py::test_token_changes_when_buffer_changes[obj0]\", \"xarray/tests/test_dask.py::test_token_changes_when_buffer_changes[obj1]\", \"xarray/tests/test_dask.py::test_token_identical[obj0-0]\", \"xarray/tests/test_dask.py::test_token_identical[obj0-1]\", \"xarray/tests/test_dask.py::test_token_identical[obj0-2]\", \"xarray/tests/test_dask.py::test_token_identical[obj1-0]\", \"xarray/tests/test_dask.py::test_token_identical[obj1-1]\", \"xarray/tests/test_dask.py::test_token_identical[obj1-2]\", \"xarray/tests/test_dask.py::test_token_identical[obj2-0]\", \"xarray/tests/test_dask.py::test_token_identical[obj2-1]\", \"xarray/tests/test_dask.py::test_token_identical[obj2-2]\", \"xarray/tests/test_dask.py::test_recursive_token\", \"xarray/tests/test_dask.py::test_normalize_token_with_backend\", \"xarray/tests/test_dask.py::test_lazy_array_equiv_variables[broadcast_equals]\", \"xarray/tests/test_dask.py::test_lazy_array_equiv_variables[equals]\", \"xarray/tests/test_dask.py::test_lazy_array_equiv_variables[identical]\", \"xarray/tests/test_dask.py::test_lazy_array_equiv_variables[no_conflicts]\", \"xarray/tests/test_dask.py::test_lazy_array_equiv_merge[broadcast_equals]\", \"xarray/tests/test_dask.py::test_lazy_array_equiv_merge[equals]\", \"xarray/tests/test_dask.py::test_lazy_array_equiv_merge[identical]\", \"xarray/tests/test_dask.py::test_lazy_array_equiv_merge[no_conflicts]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[0-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[0-obj1]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[1-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[1-obj1]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[2-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[2-obj1]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[3-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[3-obj1]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[4-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[4-obj1]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[5-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[5-obj1]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[6-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[6-obj1]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[7-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[7-obj1]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[8-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[8-obj1]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[9-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[9-obj1]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[10-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[10-obj1]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[11-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[11-obj1]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[12-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[12-obj1]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[13-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[13-obj1]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[14-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[14-obj1]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[15-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[15-obj1]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[16-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[16-obj1]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[17-obj0]\", \"xarray/tests/test_dask.py::test_transforms_pass_lazy_array_equiv[17-obj1]\", \"xarray/tests/test_dask.py::test_more_transforms_pass_lazy_array_equiv\", \"xarray/tests/test_dask.py::test_optimize\", \"xarray/tests/test_dataset.py::TestDataset::test_repr\", \"xarray/tests/test_dataset.py::TestDataset::test_repr_multiindex\", \"xarray/tests/test_dataset.py::TestDataset::test_repr_period_index\", \"xarray/tests/test_dataset.py::TestDataset::test_unicode_data\", \"xarray/tests/test_dataset.py::TestDataset::test_repr_nep18\", \"xarray/tests/test_dataset.py::TestDataset::test_info\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_invalid_dims\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_1d\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_0d\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_deprecated\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_auto_align\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_pandas_sequence\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_pandas_single\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_compat\", \"xarray/tests/test_dataset.py::TestDataset::test_constructor_with_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_properties\", \"xarray/tests/test_dataset.py::TestDataset::test_asarray\", \"xarray/tests/test_dataset.py::TestDataset::test_get_index\", \"xarray/tests/test_dataset.py::TestDataset::test_attr_access\", \"xarray/tests/test_dataset.py::TestDataset::test_variable\", \"xarray/tests/test_dataset.py::TestDataset::test_modify_inplace\", \"xarray/tests/test_dataset.py::TestDataset::test_coords_properties\", \"xarray/tests/test_dataset.py::TestDataset::test_coords_modify\", \"xarray/tests/test_dataset.py::TestDataset::test_update_index\", \"xarray/tests/test_dataset.py::TestDataset::test_coords_setitem_with_new_dimension\", \"xarray/tests/test_dataset.py::TestDataset::test_coords_setitem_multiindex\", \"xarray/tests/test_dataset.py::TestDataset::test_coords_set\", \"xarray/tests/test_dataset.py::TestDataset::test_coords_to_dataset\", \"xarray/tests/test_dataset.py::TestDataset::test_coords_merge\", \"xarray/tests/test_dataset.py::TestDataset::test_coords_merge_mismatched_shape\", \"xarray/tests/test_dataset.py::TestDataset::test_data_vars_properties\", \"xarray/tests/test_dataset.py::TestDataset::test_equals_and_identical\", \"xarray/tests/test_dataset.py::TestDataset::test_equals_failures\", \"xarray/tests/test_dataset.py::TestDataset::test_broadcast_equals\", \"xarray/tests/test_dataset.py::TestDataset::test_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_chunk\", \"xarray/tests/test_dataset.py::TestDataset::test_dask_is_lazy\", \"xarray/tests/test_dataset.py::TestDataset::test_isel\", \"xarray/tests/test_dataset.py::TestDataset::test_isel_fancy\", \"xarray/tests/test_dataset.py::TestDataset::test_isel_dataarray\", \"xarray/tests/test_dataset.py::TestDataset::test_sel\", \"xarray/tests/test_dataset.py::TestDataset::test_sel_dataarray\", \"xarray/tests/test_dataset.py::TestDataset::test_sel_dataarray_mindex\", \"xarray/tests/test_dataset.py::TestDataset::test_categorical_index\", \"xarray/tests/test_dataset.py::TestDataset::test_categorical_reindex\", \"xarray/tests/test_dataset.py::TestDataset::test_sel_drop\", \"xarray/tests/test_dataset.py::TestDataset::test_isel_drop\", \"xarray/tests/test_dataset.py::TestDataset::test_head\", \"xarray/tests/test_dataset.py::TestDataset::test_tail\", \"xarray/tests/test_dataset.py::TestDataset::test_thin\", \"xarray/tests/test_dataset.py::TestDataset::test_sel_fancy\", \"xarray/tests/test_dataset.py::TestDataset::test_sel_method\", \"xarray/tests/test_dataset.py::TestDataset::test_loc\", \"xarray/tests/test_dataset.py::TestDataset::test_selection_multiindex\", \"xarray/tests/test_dataset.py::TestDataset::test_broadcast_like\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_like\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_warning\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_variables_copied\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_method\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_fill_value[fill_value0]\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_fill_value[2]\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_fill_value[2.0]\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_fill_value[fill_value3]\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_like_fill_value[fill_value0]\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_like_fill_value[2]\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_like_fill_value[2.0]\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_like_fill_value[fill_value3]\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_str_dtype[str]\", \"xarray/tests/test_dataset.py::TestDataset::test_reindex_str_dtype[bytes]\", \"xarray/tests/test_dataset.py::TestDataset::test_align_fill_value[fill_value0]\", \"xarray/tests/test_dataset.py::TestDataset::test_align_fill_value[2]\", \"xarray/tests/test_dataset.py::TestDataset::test_align_fill_value[2.0]\", \"xarray/tests/test_dataset.py::TestDataset::test_align_fill_value[fill_value3]\", \"xarray/tests/test_dataset.py::TestDataset::test_align\", \"xarray/tests/test_dataset.py::TestDataset::test_align_exact\", \"xarray/tests/test_dataset.py::TestDataset::test_align_override\", \"xarray/tests/test_dataset.py::TestDataset::test_align_exclude\", \"xarray/tests/test_dataset.py::TestDataset::test_align_nocopy\", \"xarray/tests/test_dataset.py::TestDataset::test_align_indexes\", \"xarray/tests/test_dataset.py::TestDataset::test_align_non_unique\", \"xarray/tests/test_dataset.py::TestDataset::test_align_str_dtype\", \"xarray/tests/test_dataset.py::TestDataset::test_broadcast\", \"xarray/tests/test_dataset.py::TestDataset::test_broadcast_nocopy\", \"xarray/tests/test_dataset.py::TestDataset::test_broadcast_exclude\", \"xarray/tests/test_dataset.py::TestDataset::test_broadcast_misaligned\", \"xarray/tests/test_dataset.py::TestDataset::test_variable_indexing\", \"xarray/tests/test_dataset.py::TestDataset::test_drop_variables\", \"xarray/tests/test_dataset.py::TestDataset::test_drop_index_labels\", \"xarray/tests/test_dataset.py::TestDataset::test_drop_labels_by_keyword\", \"xarray/tests/test_dataset.py::TestDataset::test_drop_labels_by_position\", \"xarray/tests/test_dataset.py::TestDataset::test_drop_dims\", \"xarray/tests/test_dataset.py::TestDataset::test_copy\", \"xarray/tests/test_dataset.py::TestDataset::test_copy_with_data\", \"xarray/tests/test_dataset.py::TestDataset::test_copy_with_data_errors\", \"xarray/tests/test_dataset.py::TestDataset::test_rename\", \"xarray/tests/test_dataset.py::TestDataset::test_rename_old_name\", \"xarray/tests/test_dataset.py::TestDataset::test_rename_same_name\", \"xarray/tests/test_dataset.py::TestDataset::test_rename_dims\", \"xarray/tests/test_dataset.py::TestDataset::test_rename_vars\", \"xarray/tests/test_dataset.py::TestDataset::test_rename_multiindex\", \"xarray/tests/test_dataset.py::TestDataset::test_rename_does_not_change_CFTimeIndex_type\", \"xarray/tests/test_dataset.py::TestDataset::test_rename_does_not_change_DatetimeIndex_type\", \"xarray/tests/test_dataset.py::TestDataset::test_swap_dims\", \"xarray/tests/test_dataset.py::TestDataset::test_expand_dims_error\", \"xarray/tests/test_dataset.py::TestDataset::test_expand_dims_int\", \"xarray/tests/test_dataset.py::TestDataset::test_expand_dims_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_expand_dims_existing_scalar_coord\", \"xarray/tests/test_dataset.py::TestDataset::test_isel_expand_dims_roundtrip\", \"xarray/tests/test_dataset.py::TestDataset::test_expand_dims_mixed_int_and_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_expand_dims_kwargs_python36plus\", \"xarray/tests/test_dataset.py::TestDataset::test_set_index\", \"xarray/tests/test_dataset.py::TestDataset::test_reset_index\", \"xarray/tests/test_dataset.py::TestDataset::test_reset_index_keep_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_reorder_levels\", \"xarray/tests/test_dataset.py::TestDataset::test_stack\", \"xarray/tests/test_dataset.py::TestDataset::test_unstack\", \"xarray/tests/test_dataset.py::TestDataset::test_unstack_errors\", \"xarray/tests/test_dataset.py::TestDataset::test_unstack_fill_value\", \"xarray/tests/test_dataset.py::TestDataset::test_unstack_sparse\", \"xarray/tests/test_dataset.py::TestDataset::test_stack_unstack_fast\", \"xarray/tests/test_dataset.py::TestDataset::test_stack_unstack_slow\", \"xarray/tests/test_dataset.py::TestDataset::test_to_stacked_array_invalid_sample_dims\", \"xarray/tests/test_dataset.py::TestDataset::test_to_stacked_array_name\", \"xarray/tests/test_dataset.py::TestDataset::test_to_stacked_array_dtype_dims\", \"xarray/tests/test_dataset.py::TestDataset::test_to_stacked_array_to_unstacked_dataset\", \"xarray/tests/test_dataset.py::TestDataset::test_to_stacked_array_to_unstacked_dataset_different_dimension\", \"xarray/tests/test_dataset.py::TestDataset::test_update\", \"xarray/tests/test_dataset.py::TestDataset::test_update_overwrite_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_update_auto_align\", \"xarray/tests/test_dataset.py::TestDataset::test_getitem\", \"xarray/tests/test_dataset.py::TestDataset::test_getitem_hashable\", \"xarray/tests/test_dataset.py::TestDataset::test_virtual_variables_default_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_virtual_variables_time\", \"xarray/tests/test_dataset.py::TestDataset::test_virtual_variable_same_name\", \"xarray/tests/test_dataset.py::TestDataset::test_virtual_variable_multiindex\", \"xarray/tests/test_dataset.py::TestDataset::test_time_season\", \"xarray/tests/test_dataset.py::TestDataset::test_slice_virtual_variable\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_pandas\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_auto_align\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_dimension_override\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_with_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_align_new_indexes\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_str_dtype[str]\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_str_dtype[bytes]\", \"xarray/tests/test_dataset.py::TestDataset::test_assign\", \"xarray/tests/test_dataset.py::TestDataset::test_assign_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_assign_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_assign_multiindex_level\", \"xarray/tests/test_dataset.py::TestDataset::test_merge_multiindex_level\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_original_non_unique_index\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_both_non_unique_index\", \"xarray/tests/test_dataset.py::TestDataset::test_setitem_multiindex_level\", \"xarray/tests/test_dataset.py::TestDataset::test_delitem\", \"xarray/tests/test_dataset.py::TestDataset::test_squeeze\", \"xarray/tests/test_dataset.py::TestDataset::test_squeeze_drop\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby_returns_new_type\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby_iter\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby_errors\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby_reduce\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby_math\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby_math_virtual\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby_nan\", \"xarray/tests/test_dataset.py::TestDataset::test_groupby_order\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_and_first\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_min_count\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_by_mean_with_keep_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_loffset\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_by_mean_discarding_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_by_last_discarding_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_drop_nondim_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_old_api\", \"xarray/tests/test_dataset.py::TestDataset::test_resample_ds_da_are_the_same\", \"xarray/tests/test_dataset.py::TestDataset::test_ds_resample_apply_func_args\", \"xarray/tests/test_dataset.py::TestDataset::test_to_array\", \"xarray/tests/test_dataset.py::TestDataset::test_to_and_from_dataframe\", \"xarray/tests/test_dataset.py::TestDataset::test_from_dataframe_sparse\", \"xarray/tests/test_dataset.py::TestDataset::test_to_and_from_empty_dataframe\", \"xarray/tests/test_dataset.py::TestDataset::test_from_dataframe_multiindex\", \"xarray/tests/test_dataset.py::TestDataset::test_from_dataframe_unsorted_levels\", \"xarray/tests/test_dataset.py::TestDataset::test_from_dataframe_non_unique_columns\", \"xarray/tests/test_dataset.py::TestDataset::test_convert_dataframe_with_many_types_and_multiindex\", \"xarray/tests/test_dataset.py::TestDataset::test_to_and_from_dict\", \"xarray/tests/test_dataset.py::TestDataset::test_to_and_from_dict_with_time_dim\", \"xarray/tests/test_dataset.py::TestDataset::test_to_and_from_dict_with_nan_nat\", \"xarray/tests/test_dataset.py::TestDataset::test_to_dict_with_numpy_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_pickle\", \"xarray/tests/test_dataset.py::TestDataset::test_lazy_load\", \"xarray/tests/test_dataset.py::TestDataset::test_dropna\", \"xarray/tests/test_dataset.py::TestDataset::test_fillna\", \"xarray/tests/test_dataset.py::TestDataset::test_propagate_attrs[0]\", \"xarray/tests/test_dataset.py::TestDataset::test_propagate_attrs[1]\", \"xarray/tests/test_dataset.py::TestDataset::test_propagate_attrs[absolute]\", \"xarray/tests/test_dataset.py::TestDataset::test_propagate_attrs[abs]\", \"xarray/tests/test_dataset.py::TestDataset::test_where\", \"xarray/tests/test_dataset.py::TestDataset::test_where_other\", \"xarray/tests/test_dataset.py::TestDataset::test_where_drop\", \"xarray/tests/test_dataset.py::TestDataset::test_where_drop_empty\", \"xarray/tests/test_dataset.py::TestDataset::test_where_drop_no_indexes\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_mean_uint_dtype\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_bad_dim\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_cumsum\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_cumsum_test_dims\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_non_numeric\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_strings\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_dtypes\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_keep_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_argmin\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_scalars\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_only_one_axis\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_no_axis\", \"xarray/tests/test_dataset.py::TestDataset::test_reduce_keepdims\", \"xarray/tests/test_dataset.py::TestDataset::test_quantile[0.25-True]\", \"xarray/tests/test_dataset.py::TestDataset::test_quantile[0.25-False]\", \"xarray/tests/test_dataset.py::TestDataset::test_quantile[q1-True]\", \"xarray/tests/test_dataset.py::TestDataset::test_quantile[q1-False]\", \"xarray/tests/test_dataset.py::TestDataset::test_quantile[q2-True]\", \"xarray/tests/test_dataset.py::TestDataset::test_quantile[q2-False]\", \"xarray/tests/test_dataset.py::TestDataset::test_quantile_skipna[True]\", \"xarray/tests/test_dataset.py::TestDataset::test_quantile_skipna[False]\", \"xarray/tests/test_dataset.py::TestDataset::test_rank\", \"xarray/tests/test_dataset.py::TestDataset::test_count\", \"xarray/tests/test_dataset.py::TestDataset::test_map\", \"xarray/tests/test_dataset.py::TestDataset::test_apply_pending_deprecated_map\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_number_math\", \"xarray/tests/test_dataset.py::TestDataset::test_unary_ops\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_array_math\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_dataset_math\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_math_auto_align\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_math_errors\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_transpose\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_ellipsis_transpose_different_ordered_vars\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_retains_period_index_on_transpose\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_diff_n1_simple\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_diff_n1_label\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_diff_n1\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_diff_n2\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_diff_exception_n_neg\", \"xarray/tests/test_dataset.py::TestDataset::test_dataset_diff_exception_label_str\", \"xarray/tests/test_dataset.py::TestDataset::test_shift[fill_value0]\", \"xarray/tests/test_dataset.py::TestDataset::test_shift[2]\", \"xarray/tests/test_dataset.py::TestDataset::test_shift[2.0]\", \"xarray/tests/test_dataset.py::TestDataset::test_shift[fill_value3]\", \"xarray/tests/test_dataset.py::TestDataset::test_roll_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_roll_no_coords\", \"xarray/tests/test_dataset.py::TestDataset::test_roll_coords_none\", \"xarray/tests/test_dataset.py::TestDataset::test_roll_multidim\", \"xarray/tests/test_dataset.py::TestDataset::test_real_and_imag\", \"xarray/tests/test_dataset.py::TestDataset::test_setattr_raises\", \"xarray/tests/test_dataset.py::TestDataset::test_filter_by_attrs\", \"xarray/tests/test_dataset.py::TestDataset::test_binary_op_propagate_indexes\", \"xarray/tests/test_dataset.py::TestDataset::test_binary_op_join_setting\", \"xarray/tests/test_dataset.py::TestDataset::test_full_like\", \"xarray/tests/test_dataset.py::TestDataset::test_combine_first\", \"xarray/tests/test_dataset.py::TestDataset::test_sortby\", \"xarray/tests/test_dataset.py::TestDataset::test_attribute_access\", \"xarray/tests/test_dataset.py::TestDataset::test_ipython_key_completion\", \"xarray/tests/test_dataset.py::TestDataset::test_polyfit_output\", \"xarray/tests/test_dataset.py::TestDataset::test_pad\", \"xarray/tests/test_dataset.py::TestDataset::test_astype_attrs\", \"xarray/tests/test_dataset.py::test_isin[test_elements0]\", \"xarray/tests/test_dataset.py::test_isin[test_elements1]\", \"xarray/tests/test_dataset.py::test_isin[test_elements2]\", \"xarray/tests/test_dataset.py::test_isin_dask[test_elements0]\", \"xarray/tests/test_dataset.py::test_isin_dask[test_elements1]\", \"xarray/tests/test_dataset.py::test_isin_dask[test_elements2]\", \"xarray/tests/test_dataset.py::test_isin_dataset\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords0]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords1]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords2]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords3]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords4]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords5]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords6]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords7]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords8]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords0-unaligned_coords9]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords0]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords1]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords2]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords3]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords4]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords5]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords6]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords7]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords8]\", \"xarray/tests/test_dataset.py::test_dataset_constructor_aligns_to_explicit_coords[coords1-unaligned_coords9]\", \"xarray/tests/test_dataset.py::test_error_message_on_set_supplied\", \"xarray/tests/test_dataset.py::test_constructor_raises_with_invalid_coords[unaligned_coords0]\", \"xarray/tests/test_dataset.py::test_dir_expected_attrs[None]\", \"xarray/tests/test_dataset.py::test_dir_non_string[None]\", \"xarray/tests/test_dataset.py::test_dir_unicode[None]\", \"xarray/tests/test_dataset.py::test_coarsen_absent_dims_error[1]\", \"xarray/tests/test_dataset.py::test_coarsen[1-trim-left-True]\", \"xarray/tests/test_dataset.py::test_coarsen[1-trim-left-False]\", \"xarray/tests/test_dataset.py::test_coarsen[1-pad-right-True]\", \"xarray/tests/test_dataset.py::test_coarsen[1-pad-right-False]\", \"xarray/tests/test_dataset.py::test_coarsen_coords[1-True]\", \"xarray/tests/test_dataset.py::test_coarsen_coords[1-False]\", \"xarray/tests/test_dataset.py::test_coarsen_coords_cftime\", \"xarray/tests/test_dataset.py::test_coarsen_keep_attrs\", \"xarray/tests/test_dataset.py::test_rolling_keep_attrs[reduce-argument0]\", \"xarray/tests/test_dataset.py::test_rolling_keep_attrs[mean-argument1]\", \"xarray/tests/test_dataset.py::test_rolling_keep_attrs[construct-argument2]\", \"xarray/tests/test_dataset.py::test_rolling_keep_attrs[count-argument3]\", \"xarray/tests/test_dataset.py::test_rolling_keep_attrs_deprecated\", \"xarray/tests/test_dataset.py::test_rolling_properties[1]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-True-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-True-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-True-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-True-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-True-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-True-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-True-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-False-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-False-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-False-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-False-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-False-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-False-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-False-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-None-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-None-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-None-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-None-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-None-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-None-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-1-None-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-True-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-True-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-True-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-True-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-True-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-True-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-True-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-False-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-False-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-False-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-False-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-False-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-False-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-False-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-None-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-None-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-None-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-None-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-None-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-None-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z1-None-None-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-True-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-True-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-True-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-True-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-True-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-True-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-True-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-False-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-False-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-False-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-False-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-False-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-False-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-False-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-None-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-None-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-None-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-None-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-None-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-None-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-1-None-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-True-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-True-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-True-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-True-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-True-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-True-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-True-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-False-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-False-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-False-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-False-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-False-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-False-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-False-median]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-None-sum]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-None-mean]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-None-std]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-None-var]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-None-min]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-None-max]\", \"xarray/tests/test_dataset.py::test_rolling_wrapped_bottleneck[1-z2-None-None-median]\", \"xarray/tests/test_dataset.py::test_rolling_exp[1]\", \"xarray/tests/test_dataset.py::test_rolling_exp_keep_attrs[1]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[1-None-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[1-None-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[1-1-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[1-1-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[1-2-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[1-2-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[1-3-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[1-3-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[2-None-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[2-None-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[2-1-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[2-1-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[2-2-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[2-2-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[2-3-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[2-3-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[3-None-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[3-None-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[3-1-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[3-1-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[3-2-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[3-2-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[3-3-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[3-3-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[4-None-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[4-None-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[4-1-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[4-1-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[4-2-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[4-2-False]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[4-3-True]\", \"xarray/tests/test_dataset.py::test_rolling_pandas_compat[4-3-False]\", \"xarray/tests/test_dataset.py::test_rolling_construct[1-True]\", \"xarray/tests/test_dataset.py::test_rolling_construct[1-False]\", \"xarray/tests/test_dataset.py::test_rolling_construct[2-True]\", \"xarray/tests/test_dataset.py::test_rolling_construct[2-False]\", \"xarray/tests/test_dataset.py::test_rolling_construct[3-True]\", \"xarray/tests/test_dataset.py::test_rolling_construct[3-False]\", \"xarray/tests/test_dataset.py::test_rolling_construct[4-True]\", \"xarray/tests/test_dataset.py::test_rolling_construct[4-False]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-1-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-2-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-3-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[sum-4-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-1-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-2-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-3-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[mean-4-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-2-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-3-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[std-4-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-1-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-2-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-3-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[var-4-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-1-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-2-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-3-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[min-4-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-1-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-2-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-3-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[max-4-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-1-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-2-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-3-3-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-None-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-None-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-None-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-None-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-1-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-1-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-1-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-1-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-2-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-2-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-2-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-2-False-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-3-True-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-3-True-2]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-3-False-1]\", \"xarray/tests/test_dataset.py::test_rolling_reduce[median-4-3-False-2]\", \"xarray/tests/test_dataset.py::test_ndrolling_reduce[True-sum-None-True-2]\", \"xarray/tests/test_dataset.py::test_ndrolling_reduce[True-sum-None-False-2]\", \"xarray/tests/test_dataset.py::test_ndrolling_reduce[True-sum-1-True-2]\", \"xarray/tests/test_dataset.py::test_ndrolling_reduce[True-sum-1-False-2]\", \"xarray/tests/test_dataset.py::test_ndrolling_reduce[True-max-None-True-2]\", \"xarray/tests/test_dataset.py::test_ndrolling_reduce[True-max-None-False-2]\", \"xarray/tests/test_dataset.py::test_ndrolling_reduce[True-max-1-True-2]\", \"xarray/tests/test_dataset.py::test_ndrolling_reduce[True-max-1-False-2]\", \"xarray/tests/test_dataset.py::test_ndrolling_reduce[False-sum-None-True-2]\", \"xarray/tests/test_dataset.py::test_ndrolling_reduce[False-sum-None-False-2]\", \"xarray/tests/test_dataset.py::test_ndrolling_reduce[False-sum-1-True-2]\", \"xarray/tests/test_dataset.py::test_ndrolling_reduce[False-sum-1-False-2]\", \"xarray/tests/test_dataset.py::test_ndrolling_reduce[False-max-None-True-2]\", \"xarray/tests/test_dataset.py::test_ndrolling_reduce[False-max-None-False-2]\", \"xarray/tests/test_dataset.py::test_ndrolling_reduce[False-max-1-True-2]\", \"xarray/tests/test_dataset.py::test_ndrolling_reduce[False-max-1-False-2]\", \"xarray/tests/test_dataset.py::test_ndrolling_construct[True-nan-True]\", \"xarray/tests/test_dataset.py::test_ndrolling_construct[True-nan-False]\", \"xarray/tests/test_dataset.py::test_ndrolling_construct[True-nan-center2]\", \"xarray/tests/test_dataset.py::test_ndrolling_construct[True-0.0-True]\", \"xarray/tests/test_dataset.py::test_ndrolling_construct[True-0.0-False]\", \"xarray/tests/test_dataset.py::test_ndrolling_construct[True-0.0-center2]\", \"xarray/tests/test_dataset.py::test_ndrolling_construct[False-nan-True]\", \"xarray/tests/test_dataset.py::test_ndrolling_construct[False-nan-False]\", \"xarray/tests/test_dataset.py::test_ndrolling_construct[False-nan-center2]\", \"xarray/tests/test_dataset.py::test_ndrolling_construct[False-0.0-True]\", \"xarray/tests/test_dataset.py::test_ndrolling_construct[False-0.0-False]\", \"xarray/tests/test_dataset.py::test_ndrolling_construct[False-0.0-center2]\", \"xarray/tests/test_dataset.py::test_raise_no_warning_for_nan_in_binary_ops\", \"xarray/tests/test_dataset.py::test_raise_no_warning_assert_close[2]\", \"xarray/tests/test_dataset.py::test_differentiate[1-False]\", \"xarray/tests/test_dataset.py::test_differentiate[2-False]\", \"xarray/tests/test_dataset.py::test_differentiate_datetime[False]\", \"xarray/tests/test_dataset.py::test_differentiate_cftime[False]\", \"xarray/tests/test_dataset.py::test_integrate[True]\", \"xarray/tests/test_dataset.py::test_integrate[False]\", \"xarray/tests/test_dataset.py::test_trapz_datetime[np-True]\", \"xarray/tests/test_dataset.py::test_trapz_datetime[np-False]\", \"xarray/tests/test_dataset.py::test_trapz_datetime[cftime-True]\", \"xarray/tests/test_dataset.py::test_trapz_datetime[cftime-False]\", \"xarray/tests/test_dataset.py::test_no_dict\", \"xarray/tests/test_dataset.py::test_subclass_slots\", \"xarray/tests/test_dataset.py::test_weakref\", \"xarray/tests/test_dataset.py::test_deepcopy_obj_array\", \"xarray/tests/test_interp.py::test_keywargs\", \"xarray/tests/test_interp.py::test_interpolate_1d[0-x-linear]\", \"xarray/tests/test_interp.py::test_interpolate_1d[0-x-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_1d[0-y-linear]\", \"xarray/tests/test_interp.py::test_interpolate_1d[0-y-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_1d[1-x-linear]\", \"xarray/tests/test_interp.py::test_interpolate_1d[1-x-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_1d[1-y-linear]\", \"xarray/tests/test_interp.py::test_interpolate_1d[1-y-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_1d_methods[cubic]\", \"xarray/tests/test_interp.py::test_interpolate_1d_methods[zero]\", \"xarray/tests/test_interp.py::test_interpolate_vectorize[False]\", \"xarray/tests/test_interp.py::test_interpolate_vectorize[True]\", \"xarray/tests/test_interp.py::test_interpolate_nd[3]\", \"xarray/tests/test_interp.py::test_interpolate_nd[4]\", \"xarray/tests/test_interp.py::test_interpolate_nd_nd\", \"xarray/tests/test_interp.py::test_interpolate_nd_with_nan\", \"xarray/tests/test_interp.py::test_interpolate_scalar[0-linear]\", \"xarray/tests/test_interp.py::test_interpolate_scalar[1-linear]\", \"xarray/tests/test_interp.py::test_interpolate_nd_scalar[3-linear]\", \"xarray/tests/test_interp.py::test_interpolate_nd_scalar[4-linear]\", \"xarray/tests/test_interp.py::test_nans[True]\", \"xarray/tests/test_interp.py::test_nans[False]\", \"xarray/tests/test_interp.py::test_errors[True]\", \"xarray/tests/test_interp.py::test_errors[False]\", \"xarray/tests/test_interp.py::test_dtype\", \"xarray/tests/test_interp.py::test_sorted\", \"xarray/tests/test_interp.py::test_dimension_wo_coords\", \"xarray/tests/test_interp.py::test_dataset\", \"xarray/tests/test_interp.py::test_interpolate_dimorder[0]\", \"xarray/tests/test_interp.py::test_interpolate_dimorder[3]\", \"xarray/tests/test_interp.py::test_interp_like\", \"xarray/tests/test_interp.py::test_datetime[x_new0-expected0]\", \"xarray/tests/test_interp.py::test_datetime[x_new1-expected1]\", \"xarray/tests/test_interp.py::test_datetime[x_new2-expected2]\", \"xarray/tests/test_interp.py::test_datetime[x_new3-expected3]\", \"xarray/tests/test_interp.py::test_datetime[x_new4-0.5]\", \"xarray/tests/test_interp.py::test_datetime_single_string\", \"xarray/tests/test_interp.py::test_cftime\", \"xarray/tests/test_interp.py::test_cftime_type_error\", \"xarray/tests/test_interp.py::test_cftime_list_of_strings\", \"xarray/tests/test_interp.py::test_cftime_single_string\", \"xarray/tests/test_interp.py::test_datetime_to_non_datetime_error\", \"xarray/tests/test_interp.py::test_cftime_to_non_cftime_error\", \"xarray/tests/test_interp.py::test_datetime_interp_noerror\", \"xarray/tests/test_interp.py::test_3641\", \"xarray/tests/test_interp.py::test_decompose[nearest]\", \"xarray/tests/test_interp.py::test_decompose[linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-0-True-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-0-True-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-0-True-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-0-True-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-0-True-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-0-True-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-0-False-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-0-False-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-0-False-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-0-False-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-0-False-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-0-False-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-1-True-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-1-True-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-1-True-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-1-True-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-1-True-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-1-True-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-1-False-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-1-False-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-1-False-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-1-False-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-1-False-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[1-1-1-False-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-0-True-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-0-True-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-0-True-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-0-True-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-0-True-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-0-True-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-0-False-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-0-False-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-0-False-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-0-False-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-0-False-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-0-False-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-1-True-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-1-True-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-1-True-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-1-True-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-1-True-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-1-True-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-1-False-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-1-False-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-1-False-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-1-False-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-1-False-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-1-1-False-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-0-True-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-0-True-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-0-True-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-0-True-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-0-True-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-0-True-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-0-False-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-0-False-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-0-False-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-0-False-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-0-False-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-0-False-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-1-True-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-1-True-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-1-True-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-1-True-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-1-True-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-1-True-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-1-False-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-1-False-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-1-False-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-1-False-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-1-False-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-1-False-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-2-True-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-2-True-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-2-True-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-2-True-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-2-True-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-2-True-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-2-False-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-2-False-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-2-False-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-2-False-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-2-False-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[2-2-2-False-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-0-True-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-0-True-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-0-True-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-0-True-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-0-True-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-0-True-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-0-False-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-0-False-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-0-False-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-0-False-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-0-False-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-0-False-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-1-True-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-1-True-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-1-True-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-1-True-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-1-True-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-1-True-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-1-False-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-1-False-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-1-False-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-1-False-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-1-False-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-1-1-False-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-0-True-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-0-True-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-0-True-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-0-True-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-0-True-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-0-True-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-0-False-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-0-False-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-0-False-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-0-False-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-0-False-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-0-False-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-1-True-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-1-True-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-1-True-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-1-True-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-1-True-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-1-True-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-1-False-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-1-False-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-1-False-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-1-False-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-1-False-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-1-False-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-2-True-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-2-True-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-2-True-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-2-True-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-2-True-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-2-True-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-2-False-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-2-False-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-2-False-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-2-False-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-2-False-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-2-2-False-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-0-True-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-0-True-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-0-True-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-0-True-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-0-True-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-0-True-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-0-False-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-0-False-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-0-False-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-0-False-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-0-False-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-0-False-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-1-True-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-1-True-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-1-True-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-1-True-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-1-True-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-1-True-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-1-False-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-1-False-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-1-False-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-1-False-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-1-False-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-1-False-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-2-True-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-2-True-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-2-True-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-2-True-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-2-True-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-2-True-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-2-False-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-2-False-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-2-False-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-2-False-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-2-False-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-2-False-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-3-True-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-3-True-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-3-True-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-3-True-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-3-True-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-3-True-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-3-False-linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-3-False-nearest]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-3-False-zero]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-3-False-slinear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-3-False-quadratic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_1d[3-3-3-False-cubic]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_advanced[linear]\", \"xarray/tests/test_interp.py::test_interpolate_chunk_advanced[nearest]\", \"xarray/tests/test_variable.py::TestVariable::test_properties\", \"xarray/tests/test_variable.py::TestVariable::test_attrs\", \"xarray/tests/test_variable.py::TestVariable::test_getitem_dict\", \"xarray/tests/test_variable.py::TestVariable::test_getitem_1d\", \"xarray/tests/test_variable.py::TestVariable::test_getitem_1d_fancy\", \"xarray/tests/test_variable.py::TestVariable::test_getitem_with_mask\", \"xarray/tests/test_variable.py::TestVariable::test_getitem_with_mask_size_zero\", \"xarray/tests/test_variable.py::TestVariable::test_getitem_with_mask_nd_indexer\", \"xarray/tests/test_variable.py::TestVariable::test_index_0d_int\", \"xarray/tests/test_variable.py::TestVariable::test_index_0d_float\", \"xarray/tests/test_variable.py::TestVariable::test_index_0d_string\", \"xarray/tests/test_variable.py::TestVariable::test_index_0d_datetime\", \"xarray/tests/test_variable.py::TestVariable::test_index_0d_timedelta64\", \"xarray/tests/test_variable.py::TestVariable::test_index_0d_not_a_time\", \"xarray/tests/test_variable.py::TestVariable::test_index_0d_object\", \"xarray/tests/test_variable.py::TestVariable::test_0d_object_array_with_list\", \"xarray/tests/test_variable.py::TestVariable::test_index_and_concat_datetime\", \"xarray/tests/test_variable.py::TestVariable::test_0d_time_data\", \"xarray/tests/test_variable.py::TestVariable::test_datetime64_conversion\", \"xarray/tests/test_variable.py::TestVariable::test_timedelta64_conversion\", \"xarray/tests/test_variable.py::TestVariable::test_object_conversion\", \"xarray/tests/test_variable.py::TestVariable::test_datetime64_valid_range\", \"xarray/tests/test_variable.py::TestVariable::test_pandas_data\", \"xarray/tests/test_variable.py::TestVariable::test_pandas_period_index\", \"xarray/tests/test_variable.py::TestVariable::test_1d_math\", \"xarray/tests/test_variable.py::TestVariable::test_1d_reduce\", \"xarray/tests/test_variable.py::TestVariable::test_array_interface\", \"xarray/tests/test_variable.py::TestVariable::test___array__\", \"xarray/tests/test_variable.py::TestVariable::test_equals_all_dtypes\", \"xarray/tests/test_variable.py::TestVariable::test_eq_all_dtypes\", \"xarray/tests/test_variable.py::TestVariable::test_encoding_preserved\", \"xarray/tests/test_variable.py::TestVariable::test_concat\", \"xarray/tests/test_variable.py::TestVariable::test_concat_attrs\", \"xarray/tests/test_variable.py::TestVariable::test_concat_fixed_len_str\", \"xarray/tests/test_variable.py::TestVariable::test_concat_number_strings\", \"xarray/tests/test_variable.py::TestVariable::test_concat_mixed_dtypes\", \"xarray/tests/test_variable.py::TestVariable::test_copy[float-True]\", \"xarray/tests/test_variable.py::TestVariable::test_copy[float-False]\", \"xarray/tests/test_variable.py::TestVariable::test_copy[int-True]\", \"xarray/tests/test_variable.py::TestVariable::test_copy[int-False]\", \"xarray/tests/test_variable.py::TestVariable::test_copy[str-True]\", \"xarray/tests/test_variable.py::TestVariable::test_copy[str-False]\", \"xarray/tests/test_variable.py::TestVariable::test_copy_index\", \"xarray/tests/test_variable.py::TestVariable::test_copy_with_data\", \"xarray/tests/test_variable.py::TestVariable::test_copy_with_data_errors\", \"xarray/tests/test_variable.py::TestVariable::test_copy_index_with_data\", \"xarray/tests/test_variable.py::TestVariable::test_copy_index_with_data_errors\", \"xarray/tests/test_variable.py::TestVariable::test_replace\", \"xarray/tests/test_variable.py::TestVariable::test_real_and_imag\", \"xarray/tests/test_variable.py::TestVariable::test_aggregate_complex\", \"xarray/tests/test_variable.py::TestVariable::test_pandas_cateogrical_dtype\", \"xarray/tests/test_variable.py::TestVariable::test_pandas_datetime64_with_tz\", \"xarray/tests/test_variable.py::TestVariable::test_multiindex\", \"xarray/tests/test_variable.py::TestVariable::test_load\", \"xarray/tests/test_variable.py::TestVariable::test_getitem_advanced\", \"xarray/tests/test_variable.py::TestVariable::test_getitem_uint_1d\", \"xarray/tests/test_variable.py::TestVariable::test_getitem_uint\", \"xarray/tests/test_variable.py::TestVariable::test_getitem_0d_array\", \"xarray/tests/test_variable.py::TestVariable::test_getitem_fancy\", \"xarray/tests/test_variable.py::TestVariable::test_getitem_error\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg0-np_arg0-mean]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg0-np_arg0-edge]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg0-np_arg0-maximum]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg0-np_arg0-minimum]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg0-np_arg0-symmetric]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg0-np_arg0-wrap]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg1-np_arg1-mean]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg1-np_arg1-edge]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg1-np_arg1-maximum]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg1-np_arg1-minimum]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg1-np_arg1-symmetric]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg1-np_arg1-wrap]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg2-np_arg2-mean]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg2-np_arg2-edge]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg2-np_arg2-maximum]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg2-np_arg2-minimum]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg2-np_arg2-symmetric]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg2-np_arg2-wrap]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg3-np_arg3-mean]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg3-np_arg3-edge]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg3-np_arg3-maximum]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg3-np_arg3-minimum]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg3-np_arg3-symmetric]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg3-np_arg3-wrap]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg4-np_arg4-mean]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg4-np_arg4-edge]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg4-np_arg4-maximum]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg4-np_arg4-minimum]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg4-np_arg4-symmetric]\", \"xarray/tests/test_variable.py::TestVariable::test_pad[xr_arg4-np_arg4-wrap]\", \"xarray/tests/test_variable.py::TestVariable::test_pad_constant_values[xr_arg0-np_arg0]\", \"xarray/tests/test_variable.py::TestVariable::test_pad_constant_values[xr_arg1-np_arg1]\", \"xarray/tests/test_variable.py::TestVariable::test_pad_constant_values[xr_arg2-np_arg2]\", \"xarray/tests/test_variable.py::TestVariable::test_pad_constant_values[xr_arg3-np_arg3]\", \"xarray/tests/test_variable.py::TestVariable::test_pad_constant_values[xr_arg4-np_arg4]\", \"xarray/tests/test_variable.py::TestVariable::test_rolling_window\", \"xarray/tests/test_variable.py::TestVariable::test_data_and_values\", \"xarray/tests/test_variable.py::TestVariable::test_numpy_same_methods\", \"xarray/tests/test_variable.py::TestVariable::test_datetime64_conversion_scalar\", \"xarray/tests/test_variable.py::TestVariable::test_timedelta64_conversion_scalar\", \"xarray/tests/test_variable.py::TestVariable::test_0d_str\", \"xarray/tests/test_variable.py::TestVariable::test_0d_datetime\", \"xarray/tests/test_variable.py::TestVariable::test_0d_timedelta\", \"xarray/tests/test_variable.py::TestVariable::test_equals_and_identical\", \"xarray/tests/test_variable.py::TestVariable::test_broadcast_equals\", \"xarray/tests/test_variable.py::TestVariable::test_no_conflicts\", \"xarray/tests/test_variable.py::TestVariable::test_repr\", \"xarray/tests/test_variable.py::TestVariable::test_repr_lazy_data\", \"xarray/tests/test_variable.py::TestVariable::test_detect_indexer_type\", \"xarray/tests/test_variable.py::TestVariable::test_indexer_type\", \"xarray/tests/test_variable.py::TestVariable::test_items\", \"xarray/tests/test_variable.py::TestVariable::test_getitem_basic\", \"xarray/tests/test_variable.py::TestVariable::test_getitem_with_mask_2d_input\", \"xarray/tests/test_variable.py::TestVariable::test_isel\", \"xarray/tests/test_variable.py::TestVariable::test_index_0d_numpy_string\", \"xarray/tests/test_variable.py::TestVariable::test_indexing_0d_unicode\", \"xarray/tests/test_variable.py::TestVariable::test_shift[fill_value0]\", \"xarray/tests/test_variable.py::TestVariable::test_shift[2]\", \"xarray/tests/test_variable.py::TestVariable::test_shift[2.0]\", \"xarray/tests/test_variable.py::TestVariable::test_shift2d\", \"xarray/tests/test_variable.py::TestVariable::test_roll\", \"xarray/tests/test_variable.py::TestVariable::test_roll_consistency\", \"xarray/tests/test_variable.py::TestVariable::test_transpose\", \"xarray/tests/test_variable.py::TestVariable::test_transpose_0d\", \"xarray/tests/test_variable.py::TestVariable::test_squeeze\", \"xarray/tests/test_variable.py::TestVariable::test_get_axis_num\", \"xarray/tests/test_variable.py::TestVariable::test_set_dims\", \"xarray/tests/test_variable.py::TestVariable::test_set_dims_object_dtype\", \"xarray/tests/test_variable.py::TestVariable::test_stack\", \"xarray/tests/test_variable.py::TestVariable::test_stack_errors\", \"xarray/tests/test_variable.py::TestVariable::test_unstack\", \"xarray/tests/test_variable.py::TestVariable::test_unstack_errors\", \"xarray/tests/test_variable.py::TestVariable::test_unstack_2d\", \"xarray/tests/test_variable.py::TestVariable::test_stack_unstack_consistency\", \"xarray/tests/test_variable.py::TestVariable::test_broadcasting_math\", \"xarray/tests/test_variable.py::TestVariable::test_broadcasting_failures\", \"xarray/tests/test_variable.py::TestVariable::test_inplace_math\", \"xarray/tests/test_variable.py::TestVariable::test_reduce\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[None-None-0.25-True]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[None-None-0.25-False]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[None-None-q1-True]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[None-None-q1-False]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[None-None-q2-True]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[None-None-q2-False]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[0-x-0.25-True]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[0-x-0.25-False]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[0-x-q1-True]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[0-x-q1-False]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[0-x-q2-True]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[0-x-q2-False]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[axis2-dim2-0.25-True]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[axis2-dim2-0.25-False]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[axis2-dim2-q1-True]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[axis2-dim2-q1-False]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[axis2-dim2-q2-True]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[axis2-dim2-q2-False]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[axis3-dim3-0.25-True]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[axis3-dim3-0.25-False]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[axis3-dim3-q1-True]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[axis3-dim3-q1-False]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[axis3-dim3-q2-True]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile[axis3-dim3-q2-False]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile_dask[1-y-0.25]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile_dask[1-y-q1]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile_dask[1-y-q2]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile_dask[axis1-dim1-0.25]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile_dask[axis1-dim1-q1]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile_dask[axis1-dim1-q2]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile_chunked_dim_error\", \"xarray/tests/test_variable.py::TestVariable::test_quantile_out_of_bounds[-0.1]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile_out_of_bounds[1.1]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile_out_of_bounds[q2]\", \"xarray/tests/test_variable.py::TestVariable::test_quantile_out_of_bounds[q3]\", \"xarray/tests/test_variable.py::TestVariable::test_rank_dask_raises\", \"xarray/tests/test_variable.py::TestVariable::test_rank\", \"xarray/tests/test_variable.py::TestVariable::test_big_endian_reduce\", \"xarray/tests/test_variable.py::TestVariable::test_reduce_funcs\", \"xarray/tests/test_variable.py::TestVariable::test_reduce_keepdims\", \"xarray/tests/test_variable.py::TestVariable::test_reduce_keepdims_dask\", \"xarray/tests/test_variable.py::TestVariable::test_reduce_keep_attrs\", \"xarray/tests/test_variable.py::TestVariable::test_binary_ops_keep_attrs\", \"xarray/tests/test_variable.py::TestVariable::test_count\", \"xarray/tests/test_variable.py::TestVariable::test_setitem\", \"xarray/tests/test_variable.py::TestVariable::test_setitem_fancy\", \"xarray/tests/test_variable.py::TestVariable::test_coarsen\", \"xarray/tests/test_variable.py::TestVariable::test_coarsen_2d\", \"xarray/tests/test_variable.py::TestVariable::test_coarsen_keep_attrs\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_properties\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_attrs\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_getitem_dict\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_getitem_1d\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_getitem_with_mask\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_getitem_with_mask_size_zero\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_index_0d_int\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_index_0d_float\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_index_0d_string\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_index_0d_datetime\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_index_0d_timedelta64\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_index_0d_not_a_time\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_index_0d_object\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_index_and_concat_datetime\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_0d_time_data\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_datetime64_conversion\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_timedelta64_conversion\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_object_conversion\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_datetime64_valid_range\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pandas_data\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pandas_period_index\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_1d_math\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_1d_reduce\", \"xarray/tests/test_variable.py::TestVariableWithDask::test___array__\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_equals_all_dtypes\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_encoding_preserved\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_concat\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_concat_attrs\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_concat_fixed_len_str\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_concat_number_strings\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_concat_mixed_dtypes\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_copy[float-True]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_copy[float-False]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_copy[int-True]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_copy[int-False]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_copy[str-True]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_copy[str-False]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_copy_with_data\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_copy_with_data_errors\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_copy_index_with_data\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_copy_index_with_data_errors\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_replace\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_real_and_imag\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_aggregate_complex\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pandas_cateogrical_dtype\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pandas_datetime64_with_tz\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_multiindex\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_load\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_getitem_advanced\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_getitem_uint_1d\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_getitem_uint\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_getitem_0d_array\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_getitem_error\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg0-np_arg0-mean]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg0-np_arg0-edge]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg0-np_arg0-maximum]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg0-np_arg0-minimum]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg0-np_arg0-symmetric]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg0-np_arg0-wrap]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg1-np_arg1-mean]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg1-np_arg1-edge]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg1-np_arg1-maximum]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg1-np_arg1-minimum]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg1-np_arg1-symmetric]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg1-np_arg1-wrap]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg2-np_arg2-mean]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg2-np_arg2-edge]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg2-np_arg2-maximum]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg2-np_arg2-minimum]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg2-np_arg2-symmetric]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg2-np_arg2-wrap]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg3-np_arg3-mean]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg3-np_arg3-edge]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg3-np_arg3-maximum]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg3-np_arg3-minimum]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg3-np_arg3-symmetric]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg3-np_arg3-wrap]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg4-np_arg4-mean]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg4-np_arg4-edge]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg4-np_arg4-maximum]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg4-np_arg4-minimum]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg4-np_arg4-symmetric]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad[xr_arg4-np_arg4-wrap]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad_constant_values[xr_arg0-np_arg0]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad_constant_values[xr_arg1-np_arg1]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad_constant_values[xr_arg2-np_arg2]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad_constant_values[xr_arg3-np_arg3]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_pad_constant_values[xr_arg4-np_arg4]\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_rolling_window\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_getitem_fancy\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_getitem_1d_fancy\", \"xarray/tests/test_variable.py::TestVariableWithDask::test_getitem_with_mask_nd_indexer\", \"xarray/tests/test_variable.py::TestVariableWithSparse::test_as_sparse\", \"xarray/tests/test_variable.py::TestIndexVariable::test_properties\", \"xarray/tests/test_variable.py::TestIndexVariable::test_attrs\", \"xarray/tests/test_variable.py::TestIndexVariable::test_getitem_dict\", \"xarray/tests/test_variable.py::TestIndexVariable::test_getitem_1d\", \"xarray/tests/test_variable.py::TestIndexVariable::test_getitem_1d_fancy\", \"xarray/tests/test_variable.py::TestIndexVariable::test_getitem_with_mask\", \"xarray/tests/test_variable.py::TestIndexVariable::test_getitem_with_mask_size_zero\", \"xarray/tests/test_variable.py::TestIndexVariable::test_getitem_with_mask_nd_indexer\", \"xarray/tests/test_variable.py::TestIndexVariable::test_index_0d_int\", \"xarray/tests/test_variable.py::TestIndexVariable::test_index_0d_float\", \"xarray/tests/test_variable.py::TestIndexVariable::test_index_0d_string\", \"xarray/tests/test_variable.py::TestIndexVariable::test_index_0d_datetime\", \"xarray/tests/test_variable.py::TestIndexVariable::test_index_0d_timedelta64\", \"xarray/tests/test_variable.py::TestIndexVariable::test_index_0d_not_a_time\", \"xarray/tests/test_variable.py::TestIndexVariable::test_index_0d_object\", \"xarray/tests/test_variable.py::TestIndexVariable::test_0d_object_array_with_list\", \"xarray/tests/test_variable.py::TestIndexVariable::test_index_and_concat_datetime\", \"xarray/tests/test_variable.py::TestIndexVariable::test_0d_time_data\", \"xarray/tests/test_variable.py::TestIndexVariable::test_datetime64_conversion\", \"xarray/tests/test_variable.py::TestIndexVariable::test_timedelta64_conversion\", \"xarray/tests/test_variable.py::TestIndexVariable::test_object_conversion\", \"xarray/tests/test_variable.py::TestIndexVariable::test_datetime64_valid_range\", \"xarray/tests/test_variable.py::TestIndexVariable::test_pandas_data\", \"xarray/tests/test_variable.py::TestIndexVariable::test_pandas_period_index\", \"xarray/tests/test_variable.py::TestIndexVariable::test_1d_math\", \"xarray/tests/test_variable.py::TestIndexVariable::test_1d_reduce\", \"xarray/tests/test_variable.py::TestIndexVariable::test_array_interface\", \"xarray/tests/test_variable.py::TestIndexVariable::test___array__\", \"xarray/tests/test_variable.py::TestIndexVariable::test_equals_all_dtypes\", \"xarray/tests/test_variable.py::TestIndexVariable::test_eq_all_dtypes\", \"xarray/tests/test_variable.py::TestIndexVariable::test_encoding_preserved\", \"xarray/tests/test_variable.py::TestIndexVariable::test_concat\", \"xarray/tests/test_variable.py::TestIndexVariable::test_concat_attrs\", \"xarray/tests/test_variable.py::TestIndexVariable::test_concat_fixed_len_str\", \"xarray/tests/test_variable.py::TestIndexVariable::test_concat_number_strings\", \"xarray/tests/test_variable.py::TestIndexVariable::test_concat_mixed_dtypes\", \"xarray/tests/test_variable.py::TestIndexVariable::test_copy[float-True]\", \"xarray/tests/test_variable.py::TestIndexVariable::test_copy[float-False]\", \"xarray/tests/test_variable.py::TestIndexVariable::test_copy[int-True]\", \"xarray/tests/test_variable.py::TestIndexVariable::test_copy[int-False]\", \"xarray/tests/test_variable.py::TestIndexVariable::test_copy[str-True]\", \"xarray/tests/test_variable.py::TestIndexVariable::test_copy[str-False]\", \"xarray/tests/test_variable.py::TestIndexVariable::test_copy_index\", \"xarray/tests/test_variable.py::TestIndexVariable::test_copy_with_data\", \"xarray/tests/test_variable.py::TestIndexVariable::test_copy_with_data_errors\", \"xarray/tests/test_variable.py::TestIndexVariable::test_copy_index_with_data\", \"xarray/tests/test_variable.py::TestIndexVariable::test_copy_index_with_data_errors\", \"xarray/tests/test_variable.py::TestIndexVariable::test_replace\", \"xarray/tests/test_variable.py::TestIndexVariable::test_real_and_imag\", \"xarray/tests/test_variable.py::TestIndexVariable::test_aggregate_complex\", \"xarray/tests/test_variable.py::TestIndexVariable::test_pandas_cateogrical_dtype\", \"xarray/tests/test_variable.py::TestIndexVariable::test_pandas_datetime64_with_tz\", \"xarray/tests/test_variable.py::TestIndexVariable::test_multiindex\", \"xarray/tests/test_variable.py::TestIndexVariable::test_load\", \"xarray/tests/test_variable.py::TestIndexVariable::test_getitem_uint_1d\", \"xarray/tests/test_variable.py::TestIndexVariable::test_getitem_0d_array\", \"xarray/tests/test_variable.py::TestIndexVariable::test_init\", \"xarray/tests/test_variable.py::TestIndexVariable::test_to_index\", \"xarray/tests/test_variable.py::TestIndexVariable::test_multiindex_default_level_names\", \"xarray/tests/test_variable.py::TestIndexVariable::test_data\", \"xarray/tests/test_variable.py::TestIndexVariable::test_name\", \"xarray/tests/test_variable.py::TestIndexVariable::test_level_names\", \"xarray/tests/test_variable.py::TestIndexVariable::test_get_level_variable\", \"xarray/tests/test_variable.py::TestIndexVariable::test_concat_periods\", \"xarray/tests/test_variable.py::TestIndexVariable::test_concat_multiindex\", \"xarray/tests/test_variable.py::TestIndexVariable::test_concat_str_dtype[str]\", \"xarray/tests/test_variable.py::TestIndexVariable::test_concat_str_dtype[bytes]\", \"xarray/tests/test_variable.py::TestIndexVariable::test_coordinate_alias\", \"xarray/tests/test_variable.py::TestIndexVariable::test_datetime64\", \"xarray/tests/test_variable.py::TestAsCompatibleData::test_unchanged_types\", \"xarray/tests/test_variable.py::TestAsCompatibleData::test_converted_types\", \"xarray/tests/test_variable.py::TestAsCompatibleData::test_masked_array\", \"xarray/tests/test_variable.py::TestAsCompatibleData::test_datetime\", \"xarray/tests/test_variable.py::TestAsCompatibleData::test_full_like\", \"xarray/tests/test_variable.py::TestAsCompatibleData::test_full_like_dask\", \"xarray/tests/test_variable.py::TestAsCompatibleData::test_zeros_like\", \"xarray/tests/test_variable.py::TestAsCompatibleData::test_ones_like\", \"xarray/tests/test_variable.py::TestAsCompatibleData::test_unsupported_type\", \"xarray/tests/test_variable.py::test_raise_no_warning_for_nan_in_binary_ops\", \"xarray/tests/test_variable.py::TestBackendIndexing::test_NumpyIndexingAdapter\", \"xarray/tests/test_variable.py::TestBackendIndexing::test_LazilyOuterIndexedArray\", \"xarray/tests/test_variable.py::TestBackendIndexing::test_CopyOnWriteArray\", \"xarray/tests/test_variable.py::TestBackendIndexing::test_MemoryCachedArray\", \"xarray/tests/test_variable.py::TestBackendIndexing::test_DaskIndexingAdapter\"]", + "expected_spans": { + "xarray/core/variable.py": [ + "as_variable" + ] + }, + "test_file_spans": { + "xarray/tests/test_dask.py": [ + "test_map_blocks_da_transformations" + ], + "xarray/tests/test_dataset.py": [ + "TestDataset.test_reduce_keepdims" + ], + "xarray/tests/test_interp.py": [ + "test_interpolate_vectorize" + ], + "xarray/tests/test_variable.py": [ + "imports", + "TestVariable.test_repr" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-13551", + "repo": "django/django", + "base_commit": "7f9e4524d6b23424cf44fbe1bf1f4e70f6bb066e", + "problem_statement": "Changing user's email could invalidate password reset tokens\nDescription\n\t\nSequence:\nHave account with email address foo@\u2026\nPassword reset request for that email (unused)\nfoo@\u2026 account changes their email address\nPassword reset email is used\nThe password reset email's token should be rejected at that point, but in fact it is allowed.\nThe fix is to add the user's email address into \u200bPasswordResetTokenGenerator._make_hash_value()\nNothing forces a user to even have an email as per AbstractBaseUser. Perhaps the token generation method could be factored out onto the model, ala get_session_auth_hash().\n", + "golden_patch": "diff --git a/django/contrib/auth/tokens.py b/django/contrib/auth/tokens.py\n--- a/django/contrib/auth/tokens.py\n+++ b/django/contrib/auth/tokens.py\n@@ -78,9 +78,9 @@ def _make_token_with_timestamp(self, user, timestamp, legacy=False):\n \n def _make_hash_value(self, user, timestamp):\n \"\"\"\n- Hash the user's primary key and some user state that's sure to change\n- after a password reset to produce a token that invalidated when it's\n- used:\n+ Hash the user's primary key, email (if available), and some user state\n+ that's sure to change after a password reset to produce a token that is\n+ invalidated when it's used:\n 1. The password field will change upon a password reset (even if the\n same password is chosen, due to password salting).\n 2. The last_login field will usually be updated very shortly after\n@@ -94,7 +94,9 @@ def _make_hash_value(self, user, timestamp):\n # Truncate microseconds so that tokens are consistent even if the\n # database doesn't support microseconds.\n login_timestamp = '' if user.last_login is None else user.last_login.replace(microsecond=0, tzinfo=None)\n- return str(user.pk) + user.password + str(login_timestamp) + str(timestamp)\n+ email_field = user.get_email_field_name()\n+ email = getattr(user, email_field, '') or ''\n+ return f'{user.pk}{user.password}{login_timestamp}{timestamp}{email}'\n \n def _num_seconds(self, dt):\n return int((dt - datetime(2001, 1, 1)).total_seconds())\n", + "test_patch": "diff --git a/tests/auth_tests/models/__init__.py b/tests/auth_tests/models/__init__.py\n--- a/tests/auth_tests/models/__init__.py\n+++ b/tests/auth_tests/models/__init__.py\n@@ -8,6 +8,7 @@\n from .no_password import NoPasswordUser\n from .proxy import Proxy, UserProxy\n from .uuid_pk import UUIDUser\n+from .with_custom_email_field import CustomEmailField\n from .with_foreign_key import CustomUserWithFK, Email\n from .with_integer_username import IntegerUsernameUser\n from .with_last_login_attr import UserWithDisabledLastLoginField\n@@ -16,10 +17,10 @@\n )\n \n __all__ = (\n- 'CustomPermissionsUser', 'CustomUser', 'CustomUserNonUniqueUsername',\n- 'CustomUserWithFK', 'CustomUserWithM2M', 'CustomUserWithM2MThrough',\n- 'CustomUserWithoutIsActiveField', 'Email', 'ExtensionUser',\n- 'IntegerUsernameUser', 'IsActiveTestUser1', 'MinimalUser',\n+ 'CustomEmailField', 'CustomPermissionsUser', 'CustomUser',\n+ 'CustomUserNonUniqueUsername', 'CustomUserWithFK', 'CustomUserWithM2M',\n+ 'CustomUserWithM2MThrough', 'CustomUserWithoutIsActiveField', 'Email',\n+ 'ExtensionUser', 'IntegerUsernameUser', 'IsActiveTestUser1', 'MinimalUser',\n 'NoPasswordUser', 'Organization', 'Proxy', 'UUIDUser', 'UserProxy',\n 'UserWithDisabledLastLoginField',\n )\ndiff --git a/tests/auth_tests/models/with_custom_email_field.py b/tests/auth_tests/models/with_custom_email_field.py\n--- a/tests/auth_tests/models/with_custom_email_field.py\n+++ b/tests/auth_tests/models/with_custom_email_field.py\n@@ -15,7 +15,7 @@ def create_user(self, username, password, email):\n class CustomEmailField(AbstractBaseUser):\n username = models.CharField(max_length=255)\n password = models.CharField(max_length=255)\n- email_address = models.EmailField()\n+ email_address = models.EmailField(null=True)\n is_active = models.BooleanField(default=True)\n \n EMAIL_FIELD = 'email_address'\ndiff --git a/tests/auth_tests/test_models.py b/tests/auth_tests/test_models.py\n--- a/tests/auth_tests/test_models.py\n+++ b/tests/auth_tests/test_models.py\n@@ -17,8 +17,7 @@\n SimpleTestCase, TestCase, TransactionTestCase, override_settings,\n )\n \n-from .models import IntegerUsernameUser\n-from .models.with_custom_email_field import CustomEmailField\n+from .models import CustomEmailField, IntegerUsernameUser\n \n \n class NaturalKeysTestCase(TestCase):\ndiff --git a/tests/auth_tests/test_tokens.py b/tests/auth_tests/test_tokens.py\n--- a/tests/auth_tests/test_tokens.py\n+++ b/tests/auth_tests/test_tokens.py\n@@ -7,6 +7,8 @@\n from django.test.utils import ignore_warnings\n from django.utils.deprecation import RemovedInDjango40Warning\n \n+from .models import CustomEmailField\n+\n \n class MockedPasswordResetTokenGenerator(PasswordResetTokenGenerator):\n def __init__(self, now):\n@@ -37,6 +39,27 @@ def test_10265(self):\n tk2 = p0.make_token(user_reload)\n self.assertEqual(tk1, tk2)\n \n+ def test_token_with_different_email(self):\n+ \"\"\"Updating the user email address invalidates the token.\"\"\"\n+ tests = [\n+ (CustomEmailField, None),\n+ (CustomEmailField, 'test4@example.com'),\n+ (User, 'test4@example.com'),\n+ ]\n+ for model, email in tests:\n+ with self.subTest(model=model.__qualname__, email=email):\n+ user = model.objects.create_user(\n+ 'changeemailuser',\n+ email=email,\n+ password='testpw',\n+ )\n+ p0 = PasswordResetTokenGenerator()\n+ tk1 = p0.make_token(user)\n+ self.assertIs(p0.check_token(user, tk1), True)\n+ setattr(user, user.get_email_field_name(), 'test4new@example.com')\n+ user.save()\n+ self.assertIs(p0.check_token(user, tk1), False)\n+\n def test_timeout(self):\n \"\"\"The token is valid after n seconds, but no greater.\"\"\"\n # Uses a mocked version of PasswordResetTokenGenerator so we can change\n", + "fail_to_pass": "[\"Updating the user email address invalidates the token.\", \"test_token_with_different_secret (auth_tests.test_tokens.TokenGeneratorTest)\"]", + "pass_to_pass": "[\"test_str (auth_tests.test_models.GroupTests)\", \"test_group_natural_key (auth_tests.test_models.NaturalKeysTestCase)\", \"test_user_natural_key (auth_tests.test_models.NaturalKeysTestCase)\", \"test_check_password (auth_tests.test_models.AnonymousUserTests)\", \"test_delete (auth_tests.test_models.AnonymousUserTests)\", \"test_eq (auth_tests.test_models.AnonymousUserTests)\", \"test_hash (auth_tests.test_models.AnonymousUserTests)\", \"test_int (auth_tests.test_models.AnonymousUserTests)\", \"test_properties (auth_tests.test_models.AnonymousUserTests)\", \"test_save (auth_tests.test_models.AnonymousUserTests)\", \"test_set_password (auth_tests.test_models.AnonymousUserTests)\", \"test_str (auth_tests.test_models.AnonymousUserTests)\", \"test_create_superuser (auth_tests.test_models.TestCreateSuperUserSignals)\", \"test_create_user (auth_tests.test_models.TestCreateSuperUserSignals)\", \"test_str (auth_tests.test_models.PermissionTests)\", \"test_load_data_with_user_permissions (auth_tests.test_models.LoadDataWithNaturalKeysAndMultipleDatabasesTestCase)\", \"test_10265 (auth_tests.test_tokens.TokenGeneratorTest)\", \"test_check_token_with_nonexistent_token_and_user (auth_tests.test_tokens.TokenGeneratorTest)\", \"test_legacy_token_validation (auth_tests.test_tokens.TokenGeneratorTest)\", \"test_make_token (auth_tests.test_tokens.TokenGeneratorTest)\", \"The token is valid after n seconds, but no greater.\", \"test_token_default_hashing_algorithm (auth_tests.test_tokens.TokenGeneratorTest)\", \"test_user_is_created_and_added_to_group (auth_tests.test_models.LoadDataWithNaturalKeysTestCase)\", \"test_user_is_created_and_added_to_group (auth_tests.test_models.LoadDataWithoutNaturalKeysTestCase)\", \"test_backend_without_with_perm (auth_tests.test_models.UserWithPermTestCase)\", \"test_basic (auth_tests.test_models.UserWithPermTestCase)\", \"test_custom_backend (auth_tests.test_models.UserWithPermTestCase)\", \"test_custom_backend_pass_obj (auth_tests.test_models.UserWithPermTestCase)\", \"test_invalid_backend_type (auth_tests.test_models.UserWithPermTestCase)\", \"test_invalid_permission_name (auth_tests.test_models.UserWithPermTestCase)\", \"test_invalid_permission_type (auth_tests.test_models.UserWithPermTestCase)\", \"test_multiple_backends (auth_tests.test_models.UserWithPermTestCase)\", \"test_nonexistent_backend (auth_tests.test_models.UserWithPermTestCase)\", \"test_nonexistent_permission (auth_tests.test_models.UserWithPermTestCase)\", \"test_clean_normalize_username (auth_tests.test_models.AbstractBaseUserTests)\", \"test_custom_email (auth_tests.test_models.AbstractBaseUserTests)\", \"test_default_email (auth_tests.test_models.AbstractBaseUserTests)\", \"test_has_usable_password (auth_tests.test_models.AbstractBaseUserTests)\", \"test_normalize_username (auth_tests.test_models.AbstractBaseUserTests)\", \"test_builtin_user_isactive (auth_tests.test_models.IsActiveTestCase)\", \"test_is_active_field_default (auth_tests.test_models.IsActiveTestCase)\", \"test_check_password_upgrade (auth_tests.test_models.AbstractUserTestCase)\", \"test_email_user (auth_tests.test_models.AbstractUserTestCase)\", \"test_last_login_default (auth_tests.test_models.AbstractUserTestCase)\", \"test_user_clean_normalize_email (auth_tests.test_models.AbstractUserTestCase)\", \"test_user_double_save (auth_tests.test_models.AbstractUserTestCase)\", \"test_create_super_user_raises_error_on_false_is_superuser (auth_tests.test_models.UserManagerTestCase)\", \"test_create_superuser_raises_error_on_false_is_staff (auth_tests.test_models.UserManagerTestCase)\", \"test_create_user (auth_tests.test_models.UserManagerTestCase)\", \"test_create_user_email_domain_normalize (auth_tests.test_models.UserManagerTestCase)\", \"test_create_user_email_domain_normalize_rfc3696 (auth_tests.test_models.UserManagerTestCase)\", \"test_create_user_email_domain_normalize_with_whitespace (auth_tests.test_models.UserManagerTestCase)\", \"test_create_user_is_staff (auth_tests.test_models.UserManagerTestCase)\", \"test_empty_username (auth_tests.test_models.UserManagerTestCase)\", \"test_make_random_password (auth_tests.test_models.UserManagerTestCase)\", \"test_runpython_manager_methods (auth_tests.test_models.UserManagerTestCase)\"]", + "expected_spans": { + "django/contrib/auth/tokens.py": [ + "PasswordResetTokenGenerator._make_hash_value" + ] + }, + "test_file_spans": { + "tests/auth_tests/models/__init__.py": [ + "imports" + ], + "tests/auth_tests/models/with_custom_email_field.py": [ + "CustomEmailField" + ], + "tests/auth_tests/test_models.py": [ + "imports" + ], + "tests/auth_tests/test_tokens.py": [ + "imports", + "TokenGeneratorTest.test_timeout" + ] + }, + "resolved_by": [ + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/contrib/auth/tokens.py": [ + "PasswordResetTokenGenerator._make_hash_value" + ] + }, + "alternative_spans": { + "django/contrib/auth/tokens.py": [ + "PasswordResetTokenGenerator._make_hash_value" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/contrib/auth/tokens.py": [ + "PasswordResetTokenGenerator._make_hash_value", + "PasswordResetTokenGenerator" + ] + }, + "alternative_spans": { + "django/contrib/auth/tokens.py": [ + "PasswordResetTokenGenerator._make_hash_value", + "PasswordResetTokenGenerator" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/contrib/auth/tokens.py": [ + "PasswordResetTokenGenerator._make_hash_value" + ] + }, + "alternative_spans": { + "django/contrib/auth/tokens.py": [ + "PasswordResetTokenGenerator._make_hash_value" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/contrib/auth/tokens.py": [ + "PasswordResetTokenGenerator._make_hash_value" + ] + }, + "alternative_spans": { + "django/contrib/auth/tokens.py": [ + "PasswordResetTokenGenerator._make_hash_value" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/contrib/auth/tokens.py": [ + "PasswordResetTokenGenerator._make_hash_value" + ] + }, + "alternative_spans": { + "django/contrib/auth/tokens.py": [ + "PasswordResetTokenGenerator._make_hash_value" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/contrib/auth/tokens.py": [ + "PasswordResetTokenGenerator._make_hash_value" + ] + }, + "alternative_spans": { + "django/contrib/auth/tokens.py": [ + "PasswordResetTokenGenerator._make_hash_value" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/contrib/auth/tokens.py": [ + "PasswordResetTokenGenerator._make_hash_value" + ], + "tests/auth_tests/test_tokens.py": [] + }, + "alternative_spans": { + "django/contrib/auth/tokens.py": [ + "PasswordResetTokenGenerator._make_hash_value" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-20322", + "repo": "sympy/sympy", + "base_commit": "ab864967e71c950a15771bb6c3723636026ba876", + "problem_statement": "Inconsistent behavior for sympify/simplify with ceiling\nIn sympy v1.5.1:\r\n```python\r\nIn [16]: sympy.sympify('4*ceiling(x/4 - 3/4)', evaluate=False).simplify()\r\nOut[16]: 4*ceiling(x/4 - 3/4)\r\n\r\nIn [17]: sympy.sympify('4*ceiling(x/4 - 3/4)', evaluate=True).simplify()\r\nOut[17]: 4*ceiling(x/4 - 3/4)\r\n```\r\n\r\nIn sympy v.1.6.2:\r\n```python\r\nIn [16]: sympy.sympify('4*ceiling(x/4 - 3/4)', evaluate=False).simplify()\r\nOut[16]: 4*ceiling(x/4) - 3\r\n\r\nIn [17]: sympy.sympify('4*ceiling(x/4 - 3/4)', evaluate=True).simplify()\r\nOut [17]: 4*ceiling(x/4 - 3/4)\r\n```\r\n\r\nIs there a way to ensure that the behavior is consistent, even though evaluate is equal to `False` when parsing?\n", + "golden_patch": "diff --git a/sympy/core/mul.py b/sympy/core/mul.py\n--- a/sympy/core/mul.py\n+++ b/sympy/core/mul.py\n@@ -7,7 +7,7 @@\n from .singleton import S\n from .operations import AssocOp, AssocOpDispatcher\n from .cache import cacheit\n-from .logic import fuzzy_not, _fuzzy_group, fuzzy_and\n+from .logic import fuzzy_not, _fuzzy_group\n from .compatibility import reduce\n from .expr import Expr\n from .parameters import global_parameters\n@@ -1262,27 +1262,47 @@ def _eval_is_zero(self):\n zero = None\n return zero\n \n+ # without involving odd/even checks this code would suffice:\n+ #_eval_is_integer = lambda self: _fuzzy_group(\n+ # (a.is_integer for a in self.args), quick_exit=True)\n def _eval_is_integer(self):\n- from sympy import fraction\n- from sympy.core.numbers import Float\n-\n is_rational = self._eval_is_rational()\n if is_rational is False:\n return False\n \n- # use exact=True to avoid recomputing num or den\n- n, d = fraction(self, exact=True)\n- if is_rational:\n- if d is S.One:\n- return True\n- if d.is_even:\n- if d.is_prime: # literal or symbolic 2\n- return n.is_even\n- if n.is_odd:\n- return False # true even if d = 0\n- if n == d:\n- return fuzzy_and([not bool(self.atoms(Float)),\n- fuzzy_not(d.is_zero)])\n+ numerators = []\n+ denominators = []\n+ for a in self.args:\n+ if a.is_integer:\n+ numerators.append(a)\n+ elif a.is_Rational:\n+ n, d = a.as_numer_denom()\n+ numerators.append(n)\n+ denominators.append(d)\n+ elif a.is_Pow:\n+ b, e = a.as_base_exp()\n+ if not b.is_integer or not e.is_integer: return\n+ if e.is_negative:\n+ denominators.append(b)\n+ else:\n+ # for integer b and positive integer e: a = b**e would be integer\n+ assert not e.is_positive\n+ # for self being rational and e equal to zero: a = b**e would be 1\n+ assert not e.is_zero\n+ return # sign of e unknown -> self.is_integer cannot be decided\n+ else:\n+ return\n+\n+ if not denominators:\n+ return True\n+\n+ odd = lambda ints: all(i.is_odd for i in ints)\n+ even = lambda ints: any(i.is_even for i in ints)\n+\n+ if odd(numerators) and even(denominators):\n+ return False\n+ elif even(numerators) and denominators == [2]:\n+ return True\n \n def _eval_is_polar(self):\n has_polar = any(arg.is_polar for arg in self.args)\n", + "test_patch": "diff --git a/sympy/core/tests/test_arit.py b/sympy/core/tests/test_arit.py\n--- a/sympy/core/tests/test_arit.py\n+++ b/sympy/core/tests/test_arit.py\n@@ -374,12 +374,10 @@ def test_Mul_doesnt_expand_exp():\n assert (x**(-log(5)/log(3))*x)/(x*x**( - log(5)/log(3))) == sympify(1)\n \n def test_Mul_is_integer():\n-\n k = Symbol('k', integer=True)\n n = Symbol('n', integer=True)\n nr = Symbol('nr', rational=False)\n nz = Symbol('nz', integer=True, zero=False)\n- nze = Symbol('nze', even=True, zero=False)\n e = Symbol('e', even=True)\n o = Symbol('o', odd=True)\n i2 = Symbol('2', prime=True, even=True)\n@@ -388,18 +386,31 @@ def test_Mul_is_integer():\n assert (nz/3).is_integer is None\n assert (nr/3).is_integer is False\n assert (x*k*n).is_integer is None\n+ assert (e/2).is_integer is True\n+ assert (e**2/2).is_integer is True\n+ assert (2/k).is_integer is None\n+ assert (2/k**2).is_integer is None\n+ assert ((-1)**k*n).is_integer is True\n+ assert (3*k*e/2).is_integer is True\n+ assert (2*k*e/3).is_integer is None\n assert (e/o).is_integer is None\n assert (o/e).is_integer is False\n assert (o/i2).is_integer is False\n- assert Mul(o, 1/o, evaluate=False).is_integer is True\n assert Mul(k, 1/k, evaluate=False).is_integer is None\n- assert Mul(nze, 1/nze, evaluate=False).is_integer is True\n- assert Mul(2., S.Half, evaluate=False).is_integer is False\n+ assert Mul(2., S.Half, evaluate=False).is_integer is None\n+ assert (2*sqrt(k)).is_integer is None\n+ assert (2*k**n).is_integer is None\n \n s = 2**2**2**Pow(2, 1000, evaluate=False)\n m = Mul(s, s, evaluate=False)\n assert m.is_integer\n \n+ # broken in 1.6 and before, see #20161\n+ xq = Symbol('xq', rational=True)\n+ yq = Symbol('yq', rational=True)\n+ assert (xq*yq).is_integer is None\n+ e_20161 = Mul(-1,Mul(1,Pow(2,-1,evaluate=False),evaluate=False),evaluate=False)\n+ assert e_20161.is_integer is not True # expand(e_20161) -> -1/2, but no need to see that in the assumption without evaluation\n \n def test_Add_Mul_is_integer():\n x = Symbol('x')\n", + "fail_to_pass": "[\"test_Mul_is_integer\"]", + "pass_to_pass": "[\"test_bug1\", \"test_Symbol\", \"test_arit0\", \"test_div\", \"test_pow\", \"test_pow2\", \"test_pow3\", \"test_mod_pow\", \"test_pow_E\", \"test_pow_issue_3516\", \"test_pow_im\", \"test_real_mul\", \"test_ncmul\", \"test_mul_add_identity\", \"test_ncpow\", \"test_powerbug\", \"test_Mul_doesnt_expand_exp\", \"test_Add_Mul_is_integer\", \"test_Add_Mul_is_finite\", \"test_Mul_is_even_odd\", \"test_evenness_in_ternary_integer_product_with_even\", \"test_oddness_in_ternary_integer_product_with_even\", \"test_Mul_is_rational\", \"test_Add_is_rational\", \"test_Add_is_even_odd\", \"test_Mul_is_negative_positive\", \"test_Mul_is_negative_positive_2\", \"test_Mul_is_nonpositive_nonnegative\", \"test_Add_is_negative_positive\", \"test_Add_is_nonpositive_nonnegative\", \"test_Pow_is_integer\", \"test_Pow_is_real\", \"test_real_Pow\", \"test_Pow_is_finite\", \"test_Pow_is_even_odd\", \"test_Pow_is_negative_positive\", \"test_Pow_is_zero\", \"test_Pow_is_nonpositive_nonnegative\", \"test_Mul_is_imaginary_real\", \"test_Mul_hermitian_antihermitian\", \"test_Add_is_comparable\", \"test_Mul_is_comparable\", \"test_Pow_is_comparable\", \"test_Add_is_positive_2\", \"test_Add_is_irrational\", \"test_Mul_is_irrational\", \"test_issue_3531\", \"test_issue_3531b\", \"test_bug3\", \"test_suppressed_evaluation\", \"test_AssocOp_doit\", \"test_Add_Mul_Expr_args\", \"test_Add_as_coeff_mul\", \"test_Pow_as_coeff_mul_doesnt_expand\", \"test_issue_3514_18626\", \"test_make_args\", \"test_issue_5126\", \"test_Rational_as_content_primitive\", \"test_Add_as_content_primitive\", \"test_Mul_as_content_primitive\", \"test_Pow_as_content_primitive\", \"test_issue_5460\", \"test_product_irrational\", \"test_issue_5919\", \"test_Mod\", \"test_Mod_Pow\", \"test_Mod_is_integer\", \"test_Mod_is_nonposneg\", \"test_issue_6001\", \"test_polar\", \"test_issue_6040\", \"test_issue_6082\", \"test_issue_6077\", \"test_mul_flatten_oo\", \"test_add_flatten\", \"test_issue_5160_6087_6089_6090\", \"test_float_int_round\", \"test_issue_6611a\", \"test_denest_add_mul\", \"test_mul_coeff\", \"test_mul_zero_detection\", \"test_Mul_with_zero_infinite\", \"test_Mul_does_not_cancel_infinities\", \"test_Mul_does_not_distribute_infinity\", \"test_issue_8247_8354\", \"test_Add_is_zero\", \"test_issue_14392\", \"test_divmod\", \"test__neg__\"]", + "expected_spans": { + "sympy/core/mul.py": [ + "imports", + "Mul._eval_is_integer" + ] + }, + "test_file_spans": { + "sympy/core/tests/test_arit.py": [ + "test_Mul_is_integer" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-13590", + "repo": "django/django", + "base_commit": "755dbf39fcdc491fe9b588358303e259c7750be4", + "problem_statement": "Upgrading 2.2>3.0 causes named tuples used as arguments to __range to error.\nDescription\n\t\nI noticed this while upgrading a project from 2.2 to 3.0.\nThis project passes named 2-tuples as arguments to range queryset filters. This works fine on 2.2. On 3.0 it causes the following error: TypeError: __new__() missing 1 required positional argument: 'far'.\nThis happens because django.db.models.sql.query.Query.resolve_lookup_value goes into the tuple elements to resolve lookups and then attempts to reconstitute the tuple with the resolved elements.\nWhen it attempts to construct the new tuple it preserves the type (the named tuple) but it passes a iterator to it's constructor.\nNamedTuples don't have the code path for copying an iterator, and so it errors on insufficient arguments.\nThe fix is to * expand the contents of the iterator into the constructor.\n", + "golden_patch": "diff --git a/django/db/models/sql/query.py b/django/db/models/sql/query.py\n--- a/django/db/models/sql/query.py\n+++ b/django/db/models/sql/query.py\n@@ -1077,10 +1077,14 @@ def resolve_lookup_value(self, value, can_reuse, allow_joins):\n elif isinstance(value, (list, tuple)):\n # The items of the iterable may be expressions and therefore need\n # to be resolved independently.\n- return type(value)(\n+ values = (\n self.resolve_lookup_value(sub_value, can_reuse, allow_joins)\n for sub_value in value\n )\n+ type_ = type(value)\n+ if hasattr(type_, '_make'): # namedtuple\n+ return type_(*values)\n+ return type_(values)\n return value\n \n def solve_lookup_type(self, lookup):\n", + "test_patch": "diff --git a/tests/expressions/tests.py b/tests/expressions/tests.py\n--- a/tests/expressions/tests.py\n+++ b/tests/expressions/tests.py\n@@ -2,6 +2,7 @@\n import pickle\n import unittest\n import uuid\n+from collections import namedtuple\n from copy import deepcopy\n from decimal import Decimal\n from unittest import mock\n@@ -813,7 +814,7 @@ def setUpTestData(cls):\n Company.objects.create(name='5040 Ltd', num_employees=50, num_chairs=40, ceo=ceo)\n Company.objects.create(name='5050 Ltd', num_employees=50, num_chairs=50, ceo=ceo)\n Company.objects.create(name='5060 Ltd', num_employees=50, num_chairs=60, ceo=ceo)\n- Company.objects.create(name='99300 Ltd', num_employees=99, num_chairs=300, ceo=ceo)\n+ cls.c5 = Company.objects.create(name='99300 Ltd', num_employees=99, num_chairs=300, ceo=ceo)\n \n def test_in_lookup_allows_F_expressions_and_expressions_for_integers(self):\n # __in lookups can use F() expressions for integers.\n@@ -884,6 +885,13 @@ def test_range_lookup_allows_F_expressions_and_expressions_for_integers(self):\n ordered=False\n )\n \n+ def test_range_lookup_namedtuple(self):\n+ EmployeeRange = namedtuple('EmployeeRange', ['minimum', 'maximum'])\n+ qs = Company.objects.filter(\n+ num_employees__range=EmployeeRange(minimum=51, maximum=100),\n+ )\n+ self.assertSequenceEqual(qs, [self.c5])\n+\n @unittest.skipUnless(connection.vendor == 'sqlite',\n \"This defensive test only works on databases that don't validate parameter types\")\n def test_complex_expressions_do_not_introduce_sql_injection_via_untrusted_string_inclusion(self):\n", + "fail_to_pass": "[\"test_range_lookup_namedtuple (expressions.tests.IterableLookupInnerExpressionsTests)\"]", + "pass_to_pass": "[\"test_empty_group_by (expressions.tests.ExpressionWrapperTests)\", \"test_non_empty_group_by (expressions.tests.ExpressionWrapperTests)\", \"test_deconstruct (expressions.tests.FTests)\", \"test_deepcopy (expressions.tests.FTests)\", \"test_equal (expressions.tests.FTests)\", \"test_hash (expressions.tests.FTests)\", \"test_not_equal_Value (expressions.tests.FTests)\", \"test_optimizations (expressions.tests.ExistsTests)\", \"test_and (expressions.tests.CombinableTests)\", \"test_negation (expressions.tests.CombinableTests)\", \"test_or (expressions.tests.CombinableTests)\", \"test_reversed_and (expressions.tests.CombinableTests)\", \"test_reversed_or (expressions.tests.CombinableTests)\", \"test_aggregates (expressions.tests.ReprTests)\", \"test_distinct_aggregates (expressions.tests.ReprTests)\", \"test_expressions (expressions.tests.ReprTests)\", \"test_filtered_aggregates (expressions.tests.ReprTests)\", \"test_functions (expressions.tests.ReprTests)\", \"test_resolve_output_field (expressions.tests.CombinedExpressionTests)\", \"test_month_aggregation (expressions.tests.FieldTransformTests)\", \"test_multiple_transforms_in_values (expressions.tests.FieldTransformTests)\", \"test_transform_in_values (expressions.tests.FieldTransformTests)\", \"test_equal (expressions.tests.SimpleExpressionTests)\", \"test_hash (expressions.tests.SimpleExpressionTests)\", \"test_F_reuse (expressions.tests.ExpressionsTests)\", \"test_insensitive_patterns_escape (expressions.tests.ExpressionsTests)\", \"test_patterns_escape (expressions.tests.ExpressionsTests)\", \"test_complex_expressions (expressions.tests.ExpressionsNumericTests)\", \"test_fill_with_value_from_same_object (expressions.tests.ExpressionsNumericTests)\", \"test_filter_not_equals_other_field (expressions.tests.ExpressionsNumericTests)\", \"test_increment_value (expressions.tests.ExpressionsNumericTests)\", \"test_compile_unresolved (expressions.tests.ValueTests)\", \"test_deconstruct (expressions.tests.ValueTests)\", \"test_deconstruct_output_field (expressions.tests.ValueTests)\", \"test_equal (expressions.tests.ValueTests)\", \"test_equal_output_field (expressions.tests.ValueTests)\", \"test_hash (expressions.tests.ValueTests)\", \"test_raise_empty_expressionlist (expressions.tests.ValueTests)\", \"test_resolve_output_field (expressions.tests.ValueTests)\", \"test_resolve_output_field_failure (expressions.tests.ValueTests)\", \"test_update_TimeField_using_Value (expressions.tests.ValueTests)\", \"test_update_UUIDField_using_Value (expressions.tests.ValueTests)\", \"test_complex_expressions_do_not_introduce_sql_injection_via_untrusted_string_inclusion (expressions.tests.IterableLookupInnerExpressionsTests)\", \"test_expressions_in_lookups_join_choice (expressions.tests.IterableLookupInnerExpressionsTests)\", \"test_in_lookup_allows_F_expressions_and_expressions_for_datetimes (expressions.tests.IterableLookupInnerExpressionsTests)\", \"test_in_lookup_allows_F_expressions_and_expressions_for_integers (expressions.tests.IterableLookupInnerExpressionsTests)\", \"test_range_lookup_allows_F_expressions_and_expressions_for_integers (expressions.tests.IterableLookupInnerExpressionsTests)\", \"test_lefthand_addition (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_bitwise_and (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_bitwise_left_shift_operator (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_bitwise_or (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_bitwise_right_shift_operator (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_bitwise_xor (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_bitwise_xor_null (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_division (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_modulo (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_multiplication (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_power (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_subtraction (expressions.tests.ExpressionOperatorTests)\", \"test_right_hand_addition (expressions.tests.ExpressionOperatorTests)\", \"test_right_hand_division (expressions.tests.ExpressionOperatorTests)\", \"test_right_hand_modulo (expressions.tests.ExpressionOperatorTests)\", \"test_right_hand_multiplication (expressions.tests.ExpressionOperatorTests)\", \"test_right_hand_subtraction (expressions.tests.ExpressionOperatorTests)\", \"test_righthand_power (expressions.tests.ExpressionOperatorTests)\", \"test_date_case_subtraction (expressions.tests.FTimeDeltaTests)\", \"test_date_comparison (expressions.tests.FTimeDeltaTests)\", \"test_date_minus_duration (expressions.tests.FTimeDeltaTests)\", \"test_date_subquery_subtraction (expressions.tests.FTimeDeltaTests)\", \"test_date_subtraction (expressions.tests.FTimeDeltaTests)\", \"test_datetime_subquery_subtraction (expressions.tests.FTimeDeltaTests)\", \"test_datetime_subtraction (expressions.tests.FTimeDeltaTests)\", \"test_datetime_subtraction_microseconds (expressions.tests.FTimeDeltaTests)\", \"test_delta_add (expressions.tests.FTimeDeltaTests)\", \"test_delta_subtract (expressions.tests.FTimeDeltaTests)\", \"test_delta_update (expressions.tests.FTimeDeltaTests)\", \"test_duration_expressions (expressions.tests.FTimeDeltaTests)\", \"test_duration_with_datetime (expressions.tests.FTimeDeltaTests)\", \"test_duration_with_datetime_microseconds (expressions.tests.FTimeDeltaTests)\", \"test_durationfield_add (expressions.tests.FTimeDeltaTests)\", \"test_exclude (expressions.tests.FTimeDeltaTests)\", \"test_invalid_operator (expressions.tests.FTimeDeltaTests)\", \"test_mixed_comparisons2 (expressions.tests.FTimeDeltaTests)\", \"test_multiple_query_compilation (expressions.tests.FTimeDeltaTests)\", \"test_negative_timedelta_update (expressions.tests.FTimeDeltaTests)\", \"test_query_clone (expressions.tests.FTimeDeltaTests)\", \"test_time_subquery_subtraction (expressions.tests.FTimeDeltaTests)\", \"test_time_subtraction (expressions.tests.FTimeDeltaTests)\", \"test_aggregate_subquery_annotation (expressions.tests.BasicExpressionsTests)\", \"test_annotate_values_aggregate (expressions.tests.BasicExpressionsTests)\", \"test_annotate_values_count (expressions.tests.BasicExpressionsTests)\", \"test_annotate_values_filter (expressions.tests.BasicExpressionsTests)\", \"test_annotation_with_nested_outerref (expressions.tests.BasicExpressionsTests)\", \"test_annotation_with_outerref (expressions.tests.BasicExpressionsTests)\", \"test_annotations_within_subquery (expressions.tests.BasicExpressionsTests)\", \"test_arithmetic (expressions.tests.BasicExpressionsTests)\", \"test_case_in_filter_if_boolean_output_field (expressions.tests.BasicExpressionsTests)\", \"test_exist_single_field_output_field (expressions.tests.BasicExpressionsTests)\", \"test_exists_in_filter (expressions.tests.BasicExpressionsTests)\", \"test_explicit_output_field (expressions.tests.BasicExpressionsTests)\", \"test_filter_inter_attribute (expressions.tests.BasicExpressionsTests)\", \"test_filter_with_join (expressions.tests.BasicExpressionsTests)\", \"test_filtering_on_annotate_that_uses_q (expressions.tests.BasicExpressionsTests)\", \"test_filtering_on_q_that_is_boolean (expressions.tests.BasicExpressionsTests)\", \"test_filtering_on_rawsql_that_is_boolean (expressions.tests.BasicExpressionsTests)\", \"test_in_subquery (expressions.tests.BasicExpressionsTests)\", \"test_incorrect_field_in_F_expression (expressions.tests.BasicExpressionsTests)\", \"test_incorrect_joined_field_in_F_expression (expressions.tests.BasicExpressionsTests)\", \"test_nested_outerref_with_function (expressions.tests.BasicExpressionsTests)\", \"test_nested_subquery (expressions.tests.BasicExpressionsTests)\", \"test_nested_subquery_join_outer_ref (expressions.tests.BasicExpressionsTests)\", \"test_nested_subquery_outer_ref_2 (expressions.tests.BasicExpressionsTests)\", \"test_nested_subquery_outer_ref_with_autofield (expressions.tests.BasicExpressionsTests)\", \"test_new_object_create (expressions.tests.BasicExpressionsTests)\", \"test_new_object_save (expressions.tests.BasicExpressionsTests)\", \"test_object_create_with_aggregate (expressions.tests.BasicExpressionsTests)\", \"test_object_update (expressions.tests.BasicExpressionsTests)\", \"test_object_update_fk (expressions.tests.BasicExpressionsTests)\", \"test_object_update_unsaved_objects (expressions.tests.BasicExpressionsTests)\", \"test_order_by_exists (expressions.tests.BasicExpressionsTests)\", \"test_order_by_multiline_sql (expressions.tests.BasicExpressionsTests)\", \"test_order_of_operations (expressions.tests.BasicExpressionsTests)\", \"test_outerref (expressions.tests.BasicExpressionsTests)\", \"test_outerref_mixed_case_table_name (expressions.tests.BasicExpressionsTests)\", \"test_outerref_with_operator (expressions.tests.BasicExpressionsTests)\", \"test_parenthesis_priority (expressions.tests.BasicExpressionsTests)\", \"test_pickle_expression (expressions.tests.BasicExpressionsTests)\", \"test_subquery (expressions.tests.BasicExpressionsTests)\", \"test_subquery_eq (expressions.tests.BasicExpressionsTests)\", \"test_subquery_filter_by_aggregate (expressions.tests.BasicExpressionsTests)\", \"test_subquery_filter_by_lazy (expressions.tests.BasicExpressionsTests)\", \"test_subquery_group_by_outerref_in_filter (expressions.tests.BasicExpressionsTests)\", \"test_subquery_in_filter (expressions.tests.BasicExpressionsTests)\", \"test_subquery_references_joined_table_twice (expressions.tests.BasicExpressionsTests)\", \"test_ticket_11722_iexact_lookup (expressions.tests.BasicExpressionsTests)\", \"test_ticket_16731_startswith_lookup (expressions.tests.BasicExpressionsTests)\", \"test_ticket_18375_chained_filters (expressions.tests.BasicExpressionsTests)\", \"test_ticket_18375_join_reuse (expressions.tests.BasicExpressionsTests)\", \"test_ticket_18375_kwarg_ordering (expressions.tests.BasicExpressionsTests)\", \"test_ticket_18375_kwarg_ordering_2 (expressions.tests.BasicExpressionsTests)\", \"test_update (expressions.tests.BasicExpressionsTests)\", \"test_update_inherited_field_value (expressions.tests.BasicExpressionsTests)\", \"test_update_with_fk (expressions.tests.BasicExpressionsTests)\", \"test_update_with_none (expressions.tests.BasicExpressionsTests)\", \"test_uuid_pk_subquery (expressions.tests.BasicExpressionsTests)\"]", + "expected_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "test_file_spans": { + "tests/expressions/tests.py": [ + "imports", + "IterableLookupInnerExpressionsTests.setUpTestData", + "IterableLookupInnerExpressionsTests.test_complex_expressions_do_not_introduce_sql_injection_via_untrusted_string_inclusion" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/db/models/sql/query.py": [ + "imports", + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "imports", + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/db/models/sql/query.py": [ + "imports", + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "imports", + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/db/models/sql/query.py": [ + "imports", + "get_field_names_from_opts", + "impl:3", + "RawQuery.__init__", + "RawQuery.get_columns", + "RawQuery.__iter__", + "RawQuery.__repr__", + "RawQuery._execute_query", + "Query", + "Query.__init__", + "Query.has_select_fields", + "Query.get_compiler", + "Query.clone", + "Query.rewrite_cols", + "Query.get_aggregation", + "Query.exists", + "Query.combine", + "Query.deferred_to_data", + "Query.table_alias", + "Query.promote_joins", + "Query.change_aliases", + "Query.bump_prefix", + "Query.count_active_tables", + "Query.join", + "Query.join_parent_model", + "Query.add_annotation", + "Query.resolve_expression", + "Query.get_external_cols", + "Query.resolve_lookup_value", + "Query.solve_lookup_type", + "Query.check_query_object_type", + "Query.check_related_objects", + "Query.check_filterable", + "Query.build_lookup", + "Query.try_transform", + "Query.build_filter", + "Query.add_q", + "Query._add_q", + "Query.build_filtered_relation_q", + "Query.add_filtered_relation", + "Query.names_to_path", + "Query.setup_joins", + "Query.trim_joins", + "Query.resolve_ref", + "Query.split_exclude", + "Query.set_limits", + "Query.has_limit_one", + "Query.add_fields", + "Query.add_ordering", + "Query.set_group_by", + "Query.add_extra", + "Query.clear_deferred_loading", + "Query.add_deferred_loading", + "Query.add_immediate_loading", + "Query.get_loaded_field_names", + "Query.set_values", + "Query.annotation_select", + "Query.extra_select", + "Query.trim_start", + "Query.is_nullable", + "JoinPromoter.__init__", + "JoinPromoter.update_join_types" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "imports", + "get_field_names_from_opts", + "impl:3", + "RawQuery.__init__", + "RawQuery.get_columns", + "RawQuery.__iter__", + "RawQuery.__repr__", + "RawQuery._execute_query", + "Query", + "Query.__init__", + "Query.has_select_fields", + "Query.get_compiler", + "Query.clone", + "Query.rewrite_cols", + "Query.get_aggregation", + "Query.exists", + "Query.combine", + "Query.deferred_to_data", + "Query.table_alias", + "Query.promote_joins", + "Query.change_aliases", + "Query.bump_prefix", + "Query.count_active_tables", + "Query.join", + "Query.join_parent_model", + "Query.add_annotation", + "Query.resolve_expression", + "Query.get_external_cols", + "Query.resolve_lookup_value", + "Query.solve_lookup_type", + "Query.check_query_object_type", + "Query.check_related_objects", + "Query.check_filterable", + "Query.build_lookup", + "Query.try_transform", + "Query.build_filter", + "Query.add_q", + "Query._add_q", + "Query.build_filtered_relation_q", + "Query.add_filtered_relation", + "Query.names_to_path", + "Query.setup_joins", + "Query.trim_joins", + "Query.resolve_ref", + "Query.split_exclude", + "Query.set_limits", + "Query.has_limit_one", + "Query.add_fields", + "Query.add_ordering", + "Query.set_group_by", + "Query.add_extra", + "Query.clear_deferred_loading", + "Query.add_deferred_loading", + "Query.add_immediate_loading", + "Query.get_loaded_field_names", + "Query.set_values", + "Query.annotation_select", + "Query.extra_select", + "Query.trim_start", + "Query.is_nullable", + "JoinPromoter.__init__", + "JoinPromoter.update_join_types" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + }, + "alternative_spans": { + "django/db/models/sql/query.py": [ + "Query.resolve_lookup_value" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "matplotlib__matplotlib-18869", + "repo": "matplotlib/matplotlib", + "base_commit": "b7d05919865fc0c37a0164cf467d5d5513bd0ede", + "problem_statement": "Add easily comparable version info to toplevel\n\r\n\r\n### Problem\r\n\r\nCurrently matplotlib only exposes `__version__`. For quick version checks, exposing either a `version_info` tuple (which can be compared with other tuples) or a `LooseVersion` instance (which can be properly compared with other strings) would be a small usability improvement.\r\n\r\n(In practice I guess boring string comparisons will work just fine until we hit mpl 3.10 or 4.10 which is unlikely to happen soon, but that feels quite dirty :))\r\n\r\n\r\n### Proposed Solution\r\n\r\nI guess I slightly prefer `LooseVersion`, but exposing just a `version_info` tuple is much more common in other packages (and perhaps simpler to understand). The hardest(?) part is probably just bikeshedding this point :-)\r\n\r\n\r\n### Additional context and prior art\r\n\r\n`version_info` is a pretty common thing (citation needed).\r\n\r\n\n", + "golden_patch": "diff --git a/lib/matplotlib/__init__.py b/lib/matplotlib/__init__.py\n--- a/lib/matplotlib/__init__.py\n+++ b/lib/matplotlib/__init__.py\n@@ -129,25 +129,60 @@\n year = 2007\n }\"\"\"\n \n+# modelled after sys.version_info\n+_VersionInfo = namedtuple('_VersionInfo',\n+ 'major, minor, micro, releaselevel, serial')\n \n-def __getattr__(name):\n- if name == \"__version__\":\n+\n+def _parse_to_version_info(version_str):\n+ \"\"\"\n+ Parse a version string to a namedtuple analogous to sys.version_info.\n+\n+ See:\n+ https://packaging.pypa.io/en/latest/version.html#packaging.version.parse\n+ https://docs.python.org/3/library/sys.html#sys.version_info\n+ \"\"\"\n+ v = parse_version(version_str)\n+ if v.pre is None and v.post is None and v.dev is None:\n+ return _VersionInfo(v.major, v.minor, v.micro, 'final', 0)\n+ elif v.dev is not None:\n+ return _VersionInfo(v.major, v.minor, v.micro, 'alpha', v.dev)\n+ elif v.pre is not None:\n+ releaselevel = {\n+ 'a': 'alpha',\n+ 'b': 'beta',\n+ 'rc': 'candidate'}.get(v.pre[0], 'alpha')\n+ return _VersionInfo(v.major, v.minor, v.micro, releaselevel, v.pre[1])\n+ else:\n+ # fallback for v.post: guess-next-dev scheme from setuptools_scm\n+ return _VersionInfo(v.major, v.minor, v.micro + 1, 'alpha', v.post)\n+\n+\n+def _get_version():\n+ \"\"\"Return the version string used for __version__.\"\"\"\n+ # Only shell out to a git subprocess if really needed, and not on a\n+ # shallow clone, such as those used by CI, as the latter would trigger\n+ # a warning from setuptools_scm.\n+ root = Path(__file__).resolve().parents[2]\n+ if (root / \".git\").exists() and not (root / \".git/shallow\").exists():\n import setuptools_scm\n+ return setuptools_scm.get_version(\n+ root=root,\n+ version_scheme=\"post-release\",\n+ local_scheme=\"node-and-date\",\n+ fallback_version=_version.version,\n+ )\n+ else: # Get the version from the _version.py setuptools_scm file.\n+ return _version.version\n+\n+\n+def __getattr__(name):\n+ if name in (\"__version__\", \"__version_info__\"):\n global __version__ # cache it.\n- # Only shell out to a git subprocess if really needed, and not on a\n- # shallow clone, such as those used by CI, as the latter would trigger\n- # a warning from setuptools_scm.\n- root = Path(__file__).resolve().parents[2]\n- if (root / \".git\").exists() and not (root / \".git/shallow\").exists():\n- __version__ = setuptools_scm.get_version(\n- root=root,\n- version_scheme=\"post-release\",\n- local_scheme=\"node-and-date\",\n- fallback_version=_version.version,\n- )\n- else: # Get the version from the _version.py setuptools_scm file.\n- __version__ = _version.version\n- return __version__\n+ __version__ = _get_version()\n+ global __version__info__ # cache it.\n+ __version_info__ = _parse_to_version_info(__version__)\n+ return __version__ if name == \"__version__\" else __version_info__\n raise AttributeError(f\"module {__name__!r} has no attribute {name!r}\")\n \n \n", + "test_patch": "diff --git a/lib/matplotlib/tests/test_matplotlib.py b/lib/matplotlib/tests/test_matplotlib.py\n--- a/lib/matplotlib/tests/test_matplotlib.py\n+++ b/lib/matplotlib/tests/test_matplotlib.py\n@@ -7,6 +7,16 @@\n import matplotlib\n \n \n+@pytest.mark.parametrize('version_str, version_tuple', [\n+ ('3.5.0', (3, 5, 0, 'final', 0)),\n+ ('3.5.0rc2', (3, 5, 0, 'candidate', 2)),\n+ ('3.5.0.dev820+g6768ef8c4c', (3, 5, 0, 'alpha', 820)),\n+ ('3.5.0.post820+g6768ef8c4c', (3, 5, 1, 'alpha', 820)),\n+])\n+def test_parse_to_version_info(version_str, version_tuple):\n+ assert matplotlib._parse_to_version_info(version_str) == version_tuple\n+\n+\n @pytest.mark.skipif(\n os.name == \"nt\", reason=\"chmod() doesn't work as is on Windows\")\n @pytest.mark.skipif(os.name != \"nt\" and os.geteuid() == 0,\n", + "fail_to_pass": "[\"lib/matplotlib/tests/test_matplotlib.py::test_parse_to_version_info[3.5.0-version_tuple0]\", \"lib/matplotlib/tests/test_matplotlib.py::test_parse_to_version_info[3.5.0rc2-version_tuple1]\", \"lib/matplotlib/tests/test_matplotlib.py::test_parse_to_version_info[3.5.0.dev820+g6768ef8c4c-version_tuple2]\", \"lib/matplotlib/tests/test_matplotlib.py::test_parse_to_version_info[3.5.0.post820+g6768ef8c4c-version_tuple3]\"]", + "pass_to_pass": "[\"lib/matplotlib/tests/test_matplotlib.py::test_importable_with_no_home\", \"lib/matplotlib/tests/test_matplotlib.py::test_use_doc_standard_backends\", \"lib/matplotlib/tests/test_matplotlib.py::test_importable_with__OO\"]", + "expected_spans": { + "lib/matplotlib/__init__.py": [ + "__getattr__" + ] + }, + "test_file_spans": { + "lib/matplotlib/tests/test_matplotlib.py": [ + "test_tmpconfigdir_warning" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-13658", + "repo": "django/django", + "base_commit": "0773837e15bb632afffb6848a58c59a791008fa1", + "problem_statement": "ManagementUtility instantiates CommandParser without passing already-computed prog argument\nDescription\n\t\nManagementUtility \u200bgoes to the trouble to parse the program name from the argv it's passed rather than from sys.argv: \n\tdef __init__(self, argv=None):\n\t\tself.argv = argv or sys.argv[:]\n\t\tself.prog_name = os.path.basename(self.argv[0])\n\t\tif self.prog_name == '__main__.py':\n\t\t\tself.prog_name = 'python -m django'\nBut then when it needs to parse --pythonpath and --settings, it \u200buses the program name from sys.argv: \n\t\tparser = CommandParser(usage='%(prog)s subcommand [options] [args]', add_help=False, allow_abbrev=False)\nAbove \"%(prog)s\" \u200brefers to sys.argv[0]. Instead, it should refer to self.prog_name. This can fixed as follows:\n\t\tparser = CommandParser(\n\t\t\tprog=self.prog_name,\n\t\t\tusage='%(prog)s subcommand [options] [args]',\n\t\t\tadd_help=False,\n\t\t\tallow_abbrev=False)\nI'm aware that execute_from_command_line is a private API, but it'd be really convenient for me if it worked properly in my weird embedded environment where sys.argv[0] is \u200bincorrectly None. If passing my own argv to execute_from_command_line avoided all the ensuing exceptions, I wouldn't have to modify sys.argv[0] globally as I'm doing in the meantime.\n", + "golden_patch": "diff --git a/django/core/management/__init__.py b/django/core/management/__init__.py\n--- a/django/core/management/__init__.py\n+++ b/django/core/management/__init__.py\n@@ -344,7 +344,12 @@ def execute(self):\n # Preprocess options to extract --settings and --pythonpath.\n # These options could affect the commands that are available, so they\n # must be processed early.\n- parser = CommandParser(usage='%(prog)s subcommand [options] [args]', add_help=False, allow_abbrev=False)\n+ parser = CommandParser(\n+ prog=self.prog_name,\n+ usage='%(prog)s subcommand [options] [args]',\n+ add_help=False,\n+ allow_abbrev=False,\n+ )\n parser.add_argument('--settings')\n parser.add_argument('--pythonpath')\n parser.add_argument('args', nargs='*') # catch-all\n", + "test_patch": "diff --git a/tests/admin_scripts/tests.py b/tests/admin_scripts/tests.py\n--- a/tests/admin_scripts/tests.py\n+++ b/tests/admin_scripts/tests.py\n@@ -17,7 +17,7 @@\n from django import conf, get_version\n from django.conf import settings\n from django.core.management import (\n- BaseCommand, CommandError, call_command, color,\n+ BaseCommand, CommandError, call_command, color, execute_from_command_line,\n )\n from django.core.management.commands.loaddata import Command as LoaddataCommand\n from django.core.management.commands.runserver import (\n@@ -31,6 +31,7 @@\n from django.test import (\n LiveServerTestCase, SimpleTestCase, TestCase, override_settings,\n )\n+from django.test.utils import captured_stderr, captured_stdout\n \n custom_templates_dir = os.path.join(os.path.dirname(__file__), 'custom_templates')\n \n@@ -1867,6 +1868,20 @@ def _test(self, args, option_b=\"'2'\"):\n )\n \n \n+class ExecuteFromCommandLine(SimpleTestCase):\n+ def test_program_name_from_argv(self):\n+ \"\"\"\n+ Program name is computed from the execute_from_command_line()'s argv\n+ argument, not sys.argv.\n+ \"\"\"\n+ args = ['help', 'shell']\n+ with captured_stdout() as out, captured_stderr() as err:\n+ with mock.patch('sys.argv', [None] + args):\n+ execute_from_command_line(['django-admin'] + args)\n+ self.assertIn('usage: django-admin shell', out.getvalue())\n+ self.assertEqual(err.getvalue(), '')\n+\n+\n @override_settings(ROOT_URLCONF='admin_scripts.urls')\n class StartProject(LiveServerTestCase, AdminScriptTestCase):\n \n", + "fail_to_pass": "[\"test_program_name_from_argv (admin_scripts.tests.ExecuteFromCommandLine)\"]", + "pass_to_pass": "[\"test_params_to_runserver (admin_scripts.tests.ManageTestserver)\", \"test_testserver_handle_params (admin_scripts.tests.ManageTestserver)\", \"test_no_database (admin_scripts.tests.ManageRunserver)\", \"test_readonly_database (admin_scripts.tests.ManageRunserver)\", \"test_runner_addrport_ipv6 (admin_scripts.tests.ManageRunserver)\", \"test_runner_ambiguous (admin_scripts.tests.ManageRunserver)\", \"test_runner_custom_defaults (admin_scripts.tests.ManageRunserver)\", \"test_runner_custom_defaults_ipv6 (admin_scripts.tests.ManageRunserver)\", \"test_runner_hostname (admin_scripts.tests.ManageRunserver)\", \"test_runner_hostname_ipv6 (admin_scripts.tests.ManageRunserver)\", \"test_runserver_addrport (admin_scripts.tests.ManageRunserver)\", \"test_migration_warning_one_app (admin_scripts.tests.ManageRunserverMigrationWarning)\", \"test_precedence (admin_scripts.tests.Discovery)\", \"test_program_name_in_help (admin_scripts.tests.MainModule)\", \"test_non_existent_command_output (admin_scripts.tests.ManageManuallyConfiguredSettings)\", \"Regression for #20509\", \"test_empty_allowed_hosts_error (admin_scripts.tests.ManageRunserverEmptyAllowedHosts)\", \"no settings: manage.py builtin commands fail with an error when no settings provided\", \"no settings: manage.py builtin commands fail if settings file (from environment) doesn't exist\", \"no settings: manage.py builtin commands fail if settings file (from argument) doesn't exist\", \"test_attribute_error (admin_scripts.tests.ManageSettingsWithSettingsErrors)\", \"test_help (admin_scripts.tests.ManageSettingsWithSettingsErrors)\", \"test_import_error (admin_scripts.tests.ManageSettingsWithSettingsErrors)\", \"test_key_error (admin_scripts.tests.ManageSettingsWithSettingsErrors)\", \"test_no_suggestions (admin_scripts.tests.DjangoAdminSuggestions)\", \"test_suggestions (admin_scripts.tests.DjangoAdminSuggestions)\", \"no settings: django-admin builtin commands fail with an error when no settings provided\", \"no settings: django-admin builtin commands fail if settings file (from environment) doesn't exist\", \"no settings: django-admin builtin commands fail if settings file (from argument) doesn't exist\", \"test_commands_with_invalid_settings (admin_scripts.tests.DjangoAdminNoSettings)\", \"Options passed before settings are correctly handled.\", \"Options are correctly handled when they are passed before and after\", \"Options passed after settings are correctly handled.\", \"Short options passed after settings are correctly handled.\", \"Short options passed before settings are correctly handled.\", \"minimal: django-admin builtin commands fail with an error when no settings provided\", \"minimal: django-admin builtin commands fail if settings file (from environment) doesn't exist\", \"minimal: django-admin builtin commands fail if settings file (from argument) doesn't exist\", \"minimal: django-admin builtin commands fail if settings are provided in the environment\", \"minimal: django-admin builtin commands fail if settings are provided as argument\", \"minimal: django-admin can't execute user commands unless settings are provided\", \"minimal: django-admin can't execute user commands, even if settings are provided in environment\", \"minimal: django-admin can't execute user commands, even if settings are provided as argument\", \"alternate: django-admin builtin commands fail with an error when no settings provided\", \"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist\", \"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist\", \"alternate: django-admin builtin commands succeed if settings are provided in the environment\", \"alternate: django-admin builtin commands succeed if settings are provided as argument\", \"alternate: django-admin can't execute user commands unless settings are provided\", \"alternate: django-admin can execute user commands if settings are provided in environment\", \"alternate: django-admin can execute user commands if settings are provided as argument\", \"default: django-admin builtin commands fail with an error when no settings provided\", \"default: django-admin builtin commands fail if settings file (from environment) doesn't exist\", \"default: django-admin builtin commands fail if settings file (from argument) doesn't exist\", \"default: django-admin builtin commands succeed if settings are provided in the environment\", \"default: django-admin builtin commands succeed if settings are provided as argument\", \"default: django-admin can't execute user commands if it isn't provided settings\", \"default: django-admin can execute user commands if settings are provided in environment\", \"default: django-admin can execute user commands if settings are provided as argument\", \"directory: django-admin builtin commands fail with an error when no settings provided\", \"directory: django-admin builtin commands fail if settings file (from environment) doesn't exist\", \"directory: django-admin builtin commands fail if settings file (from argument) doesn't exist\", \"directory: django-admin builtin commands succeed if settings are provided in the environment\", \"directory: django-admin builtin commands succeed if settings are provided as argument\", \"directory: django-admin can't execute user commands unless settings are provided\", \"directory: startapp creates the correct directory\", \"directory: startapp creates the correct directory with a custom template\", \"test_importable_name (admin_scripts.tests.StartApp)\", \"test_importable_target_name (admin_scripts.tests.StartApp)\", \"startapp validates that app name is a valid Python identifier.\", \"test_invalid_target_name (admin_scripts.tests.StartApp)\", \"test_overlaying_app (admin_scripts.tests.StartApp)\", \"manage.py check does not raise errors when an app imports a base\", \"manage.py check reports an ImportError if an app's models.py\", \"manage.py check does not raise an ImportError validating a\", \"check reports an error on a nonexistent app in INSTALLED_APPS.\", \"All errors/warnings should be sorted by level and by message.\", \"test_warning_does_not_halt (admin_scripts.tests.ManageCheck)\", \"fulldefault: django-admin builtin commands fail with an error when no settings provided\", \"fulldefault: django-admin builtin commands fail if settings file (from environment) doesn't exist\", \"fulldefault: django-admin builtin commands fail if settings file (from argument) doesn't exist\", \"fulldefault: django-admin builtin commands succeed if the environment contains settings\", \"fulldefault: django-admin builtin commands succeed if a settings file is provided\", \"fulldefault: django-admin can't execute user commands unless settings are provided\", \"fulldefault: django-admin can execute user commands if settings are provided in environment\", \"fulldefault: django-admin can execute user commands if settings are provided as argument\", \"Runs without error and emits settings diff.\", \"test_custom_default (admin_scripts.tests.DiffSettings)\", \"test_dynamic_settings_configured (admin_scripts.tests.DiffSettings)\", \"test_settings_configured (admin_scripts.tests.DiffSettings)\", \"--output=unified emits settings diff in unified mode.\", \"default: manage.py builtin commands succeed when default settings are appropriate\", \"default: manage.py builtin commands fail if settings file (from environment) doesn't exist\", \"default: manage.py builtin commands succeed if settings file (from argument) doesn't exist\", \"default: manage.py builtin commands succeed if settings are provided in the environment\", \"default: manage.py builtin commands succeed if settings are provided as argument\", \"default: manage.py can execute user commands when default settings are appropriate\", \"default: manage.py can execute user commands when settings are provided in environment\", \"default: manage.py can execute user commands when settings are provided as argument\", \"alternate: manage.py builtin commands fail with an error when no default settings provided\", \"alternate: manage.py builtin commands fail if settings file (from environment) doesn't exist\", \"alternate: manage.py builtin commands fail if settings file (from argument) doesn't exist\", \"alternate: manage.py builtin commands work if settings are provided in the environment\", \"alternate: manage.py builtin commands work with settings provided as argument\", \"alternate: manage.py can't execute user commands without settings\", \"alternate: manage.py output syntax color can be deactivated with the `--no-color` option\", \"alternate: manage.py can execute user commands if settings are provided in environment\", \"alternate: manage.py can execute user commands if settings are provided as argument\", \"minimal: manage.py builtin commands fail with an error when no settings provided\", \"minimal: manage.py builtin commands fail if settings file (from environment) doesn't exist\", \"minimal: manage.py builtin commands fail if settings file (from argument) doesn't exist\", \"minimal: manage.py builtin commands fail if settings are provided in the environment\", \"minimal: manage.py builtin commands fail if settings are provided as argument\", \"minimal: manage.py can't execute user commands without appropriate settings\", \"minimal: manage.py can't execute user commands, even if settings are provided in environment\", \"minimal: manage.py can't execute user commands, even if settings are provided as argument\", \"multiple: manage.py builtin commands fail with an error when no settings provided\", \"multiple: manage.py builtin commands fail if settings file (from environment) doesn't exist\", \"multiple: manage.py builtin commands fail if settings file (from argument) doesn't exist\", \"multiple: manage.py can execute builtin commands if settings are provided in the environment\", \"multiple: manage.py builtin commands succeed if settings are provided as argument\", \"multiple: manage.py can't execute user commands using default settings\", \"multiple: manage.py can execute user commands if settings are provided in environment\", \"multiple: manage.py can execute user commands if settings are provided as argument\", \"fulldefault: manage.py builtin commands succeed when default settings are appropriate\", \"fulldefault: manage.py builtin commands fail if settings file (from environment) doesn't exist\", \"fulldefault: manage.py builtin commands succeed if settings file (from argument) doesn't exist\", \"fulldefault: manage.py builtin commands succeed if settings are provided in the environment\", \"fulldefault: manage.py builtin commands succeed if settings are provided as argument\", \"fulldefault: manage.py can execute user commands when default settings are appropriate\", \"fulldefault: manage.py can execute user commands when settings are provided in environment\", \"fulldefault: manage.py can execute user commands when settings are provided as argument\", \"test_custom_project_destination_missing (admin_scripts.tests.StartProject)\", \"Make sure the startproject management command is able to use a different project template\", \"Make sure template context variables are rendered with proper values\", \"Make sure the startproject management command is able to use a different project template from a tarball\", \"test_custom_project_template_from_tarball_by_url (admin_scripts.tests.StartProject)\", \"Startproject can use a project template from a tarball and create it in a specified location\", \"test_custom_project_template_with_non_ascii_templates (admin_scripts.tests.StartProject)\", \"Make sure the startproject management command is able to render custom files\", \"test_importable_project_name (admin_scripts.tests.StartProject)\", \"Make sure the startproject management command validates a project name\", \"Make sure template context variables are not html escaped\", \"Startproject management command handles project template tar/zip balls from non-canonical urls\", \"Make sure the startproject management command creates a project\", \"Make sure the startproject management command creates a project in a specific directory\", \"Ticket 17475: Template dir passed has a trailing path separator\", \"Make sure passing the wrong kinds of arguments outputs an error and prints usage\", \"User AppCommands can execute when a single app name is provided\", \"User AppCommands raise an error when multiple app names are provided\", \"User AppCommands raise an error when no app name is provided\", \"User AppCommands can execute when some of the provided app names are invalid\", \"User BaseCommands can execute when a label is provided\", \"User BaseCommands can execute when no labels are provided\", \"User BaseCommands can execute with options when a label is provided\", \"User BaseCommands can execute with multiple options when a label is provided\", \"User BaseCommands outputs command usage when wrong option is specified\", \"test_base_run_from_argv (admin_scripts.tests.CommandTypes)\", \"test_color_style (admin_scripts.tests.CommandTypes)\", \"test_command_color (admin_scripts.tests.CommandTypes)\", \"--no-color prevent colorization of the output\", \"test_custom_stderr (admin_scripts.tests.CommandTypes)\", \"test_custom_stdout (admin_scripts.tests.CommandTypes)\", \"test_force_color_command_init (admin_scripts.tests.CommandTypes)\", \"test_force_color_execute (admin_scripts.tests.CommandTypes)\", \"help is handled as a special case\", \"--help is equivalent to help\", \"help --commands shows the list of all available commands\", \"-h is handled as a short form of --help\", \"User LabelCommands can execute when a label is provided\", \"User LabelCommands are executed multiple times if multiple labels are provided\", \"User LabelCommands raise an error if no label is provided\", \"test_no_color_force_color_mutually_exclusive_command_init (admin_scripts.tests.CommandTypes)\", \"test_no_color_force_color_mutually_exclusive_execute (admin_scripts.tests.CommandTypes)\", \"NoArg Commands can be executed\", \"NoArg Commands raise an error if an argument is provided\", \"test_run_from_argv_closes_connections (admin_scripts.tests.CommandTypes)\", \"test_run_from_argv_non_ascii_error (admin_scripts.tests.CommandTypes)\", \"--help can be used on a specific command\", \"version is handled as a special case\", \"--version is equivalent to version\"]", + "expected_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "test_file_spans": { + "tests/admin_scripts/tests.py": [ + "imports", + "StartProject" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20231010_rag_claude2", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/core/management/__init__.py": [ + "imports", + "find_commands", + "get_commands", + "call_command", + "ManagementUtility.__init__", + "ManagementUtility.main_help_text", + "ManagementUtility.fetch_command", + "ManagementUtility.autocomplete", + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "imports", + "find_commands", + "get_commands", + "call_command", + "ManagementUtility.__init__", + "ManagementUtility.main_help_text", + "ManagementUtility.fetch_command", + "ManagementUtility.autocomplete", + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240402_rag_gpt4", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.__init__", + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.__init__", + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.__init__", + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.__init__", + "ManagementUtility.execute" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.__init__", + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.__init__", + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240402_rag_claude3opus", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ], + "tests/user_commands/tests.py": [ + "imports", + "CommandTests.test_call_command_option_parsing", + "CommandTests.test_call_command_option_parsing_non_string_arg", + "CommandTests.test_calling_a_command_with_only_empty_parameter_should_ends_gracefully", + "CommandTests.test_calling_command_with_app_labels_and_parameters_should_be_ok", + "CommandTests.test_calling_command_with_parameters_and_app_labels_at_the_end_should_be_ok", + "CommandTests.test_calling_a_command_with_no_app_labels_and_parameters_should_raise_a_command_error", + "CommandTests.test_output_transaction", + "CommandTests.test_call_command_no_checks", + "CommandTests.test_requires_system_checks_empty", + "CommandTests.test_requires_system_checks_specific", + "CommandTests.test_requires_system_checks_invalid", + "CommandTests.test_check_migrations", + "CommandTests.test_call_command_unrecognized_option", + "CommandTests.test_call_command_with_required_parameters_in_options", + "CommandTests.test_call_command_with_required_parameters_in_mixed_options", + "CommandTests.test_command_add_arguments_after_common_arguments", + "CommandTests.test_mutually_exclusive_group_required_options", + "CommandTests.test_mutually_exclusive_group_required_const_options", + "CommandTests.test_required_list_option", + "CommandTests.test_required_const_options", + "CommandTests.test_subparser", + "CommandTests.test_subparser_dest_args", + "CommandTests.test_subparser_dest_required_args", + "CommandTests.test_subparser_invalid_option", + "CommandTests.test_create_parser_kwargs", + "CommandTests.test_outputwrapper_flush", + "CommandRunTests", + "CommandRunTests.test_script_prefix_set_in_commands", + "CommandRunTests.test_disallowed_abbreviated_options", + "CommandRunTests.test_skip_checks", + "UtilsTests", + "UtilsTests.test_no_existent_external_program", + "UtilsTests.test_get_random_secret_key", + "UtilsTests.test_is_ignored_path_true", + "UtilsTests.test_is_ignored_path_false", + "UtilsTests.test_normalize_path_patterns_truncates_wildcard_base", + "DeprecationTests.test_requires_system_checks_warning", + "DeprecationTests.test_requires_system_checks_true", + "DeprecationTests.test_requires_system_checks_false" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + }, + "alternative_spans": { + "django/core/management/__init__.py": [ + "ManagementUtility.execute" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-13660", + "repo": "django/django", + "base_commit": "50c3ac6fa9b7c8a94a6d1dc87edf775e3bc4d575", + "problem_statement": "shell command crashes when passing (with -c) the python code with functions.\nDescription\n\t\nThe examples below use Python 3.7 and Django 2.2.16, but I checked that the code is the same on master and works the same in Python 3.8.\nHere's how \u200bpython -c works:\n$ python -c <\n\tmanagement.execute_from_command_line()\n File \"{sys.prefix}/lib/python3.7/site-packages/django/core/management/__init__.py\", line 381, in execute_from_command_line\n\tutility.execute()\n File \"{sys.prefix}/lib/python3.7/site-packages/django/core/management/__init__.py\", line 375, in execute\n\tself.fetch_command(subcommand).run_from_argv(self.argv)\n File \"{sys.prefix}/lib/python3.7/site-packages/django/core/management/base.py\", line 323, in run_from_argv\n\tself.execute(*args, **cmd_options)\n File \"{sys.prefix}/lib/python3.7/site-packages/django/core/management/base.py\", line 364, in execute\n\toutput = self.handle(*args, **options)\n File \"{sys.prefix}/lib/python3.7/site-packages/django/core/management/commands/shell.py\", line 86, in handle\n\texec(options['command'])\n File \"\", line 5, in \n File \"\", line 4, in f\nNameError: name 'django' is not defined\nThe problem is in the \u200busage of \u200bexec:\n\tdef handle(self, **options):\n\t\t# Execute the command and exit.\n\t\tif options['command']:\n\t\t\texec(options['command'])\n\t\t\treturn\n\t\t# Execute stdin if it has anything to read and exit.\n\t\t# Not supported on Windows due to select.select() limitations.\n\t\tif sys.platform != 'win32' and not sys.stdin.isatty() and select.select([sys.stdin], [], [], 0)[0]:\n\t\t\texec(sys.stdin.read())\n\t\t\treturn\nexec should be passed a dictionary containing a minimal set of globals. This can be done by just passing a new, empty dictionary as the second argument of exec.\n", + "golden_patch": "diff --git a/django/core/management/commands/shell.py b/django/core/management/commands/shell.py\n--- a/django/core/management/commands/shell.py\n+++ b/django/core/management/commands/shell.py\n@@ -84,13 +84,13 @@ def python(self, options):\n def handle(self, **options):\n # Execute the command and exit.\n if options['command']:\n- exec(options['command'])\n+ exec(options['command'], globals())\n return\n \n # Execute stdin if it has anything to read and exit.\n # Not supported on Windows due to select.select() limitations.\n if sys.platform != 'win32' and not sys.stdin.isatty() and select.select([sys.stdin], [], [], 0)[0]:\n- exec(sys.stdin.read())\n+ exec(sys.stdin.read(), globals())\n return\n \n available_shells = [options['interface']] if options['interface'] else self.shells\n", + "test_patch": "diff --git a/tests/shell/tests.py b/tests/shell/tests.py\n--- a/tests/shell/tests.py\n+++ b/tests/shell/tests.py\n@@ -9,6 +9,13 @@\n \n \n class ShellCommandTestCase(SimpleTestCase):\n+ script_globals = 'print(\"__name__\" in globals())'\n+ script_with_inline_function = (\n+ 'import django\\n'\n+ 'def f():\\n'\n+ ' print(django.__version__)\\n'\n+ 'f()'\n+ )\n \n def test_command_option(self):\n with self.assertLogs('test', 'INFO') as cm:\n@@ -21,6 +28,16 @@ def test_command_option(self):\n )\n self.assertEqual(cm.records[0].getMessage(), __version__)\n \n+ def test_command_option_globals(self):\n+ with captured_stdout() as stdout:\n+ call_command('shell', command=self.script_globals)\n+ self.assertEqual(stdout.getvalue().strip(), 'True')\n+\n+ def test_command_option_inline_function_call(self):\n+ with captured_stdout() as stdout:\n+ call_command('shell', command=self.script_with_inline_function)\n+ self.assertEqual(stdout.getvalue().strip(), __version__)\n+\n @unittest.skipIf(sys.platform == 'win32', \"Windows select() doesn't support file descriptors.\")\n @mock.patch('django.core.management.commands.shell.select')\n def test_stdin_read(self, select):\n@@ -30,6 +47,30 @@ def test_stdin_read(self, select):\n call_command('shell')\n self.assertEqual(stdout.getvalue().strip(), '100')\n \n+ @unittest.skipIf(\n+ sys.platform == 'win32',\n+ \"Windows select() doesn't support file descriptors.\",\n+ )\n+ @mock.patch('django.core.management.commands.shell.select') # [1]\n+ def test_stdin_read_globals(self, select):\n+ with captured_stdin() as stdin, captured_stdout() as stdout:\n+ stdin.write(self.script_globals)\n+ stdin.seek(0)\n+ call_command('shell')\n+ self.assertEqual(stdout.getvalue().strip(), 'True')\n+\n+ @unittest.skipIf(\n+ sys.platform == 'win32',\n+ \"Windows select() doesn't support file descriptors.\",\n+ )\n+ @mock.patch('django.core.management.commands.shell.select') # [1]\n+ def test_stdin_read_inline_function_call(self, select):\n+ with captured_stdin() as stdin, captured_stdout() as stdout:\n+ stdin.write(self.script_with_inline_function)\n+ stdin.seek(0)\n+ call_command('shell')\n+ self.assertEqual(stdout.getvalue().strip(), __version__)\n+\n @mock.patch('django.core.management.commands.shell.select.select') # [1]\n @mock.patch.dict('sys.modules', {'IPython': None})\n def test_shell_with_ipython_not_installed(self, select):\n", + "fail_to_pass": "[\"test_command_option_inline_function_call (shell.tests.ShellCommandTestCase)\", \"test_stdin_read_inline_function_call (shell.tests.ShellCommandTestCase)\"]", + "pass_to_pass": "[\"test_command_option (shell.tests.ShellCommandTestCase)\", \"test_command_option_globals (shell.tests.ShellCommandTestCase)\", \"test_shell_with_bpython_not_installed (shell.tests.ShellCommandTestCase)\", \"test_shell_with_ipython_not_installed (shell.tests.ShellCommandTestCase)\", \"test_stdin_read (shell.tests.ShellCommandTestCase)\", \"test_stdin_read_globals (shell.tests.ShellCommandTestCase)\"]", + "expected_spans": { + "django/core/management/commands/shell.py": [ + "Command.handle" + ] + }, + "test_file_spans": { + "tests/shell/tests.py": [ + "ShellCommandTestCase", + "ShellCommandTestCase.test_stdin_read", + "ShellCommandTestCase.test_shell_with_ipython_not_installed" + ] + }, + "resolved_by": [ + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/core/management/commands/shell.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/shell.py": [ + "Command.handle" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/core/management/commands/shell.py": [ + "Command.handle" + ] + }, + "alternative_spans": { + "django/core/management/commands/shell.py": [ + "Command.handle" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sphinx-doc__sphinx-8435", + "repo": "sphinx-doc/sphinx", + "base_commit": "5d8d6275a54f2c5fb72b82383b5712c22d337634", + "problem_statement": "autodoc_type_aliases does not effect to variables and attributes\n**Describe the bug**\r\nautodoc_type_aliases does not effect to variables and attributes\r\n\r\n**To Reproduce**\r\n\r\n```\r\n# example.py\r\nfrom __future__ import annotations\r\n\r\n\r\n#: blah blah blah\r\nvar: String\r\n\r\n\r\nclass MyString:\r\n \"mystring\"\r\n\r\n #: blah blah blah\r\n var: String\r\n```\r\n```\r\n# index.rst\r\n.. automodule:: example\r\n :members:\r\n :undoc-members:\r\n```\r\n```\r\n# conf.py\r\nautodoc_type_aliases = {\r\n 'String': 'example.MyString'\r\n}\r\n```\r\n\r\n**Expected behavior**\r\n`autodoc_type_aliases` should be applied to `example.var` and `example.MyString.var`.\r\n\r\n**Your project**\r\nN/A\r\n\r\n**Screenshots**\r\nN/A\r\n\r\n**Environment info**\r\n- OS: Mac\r\n- Python version: 3.9.0\r\n- Sphinx version: HEAD of 3.x branch\r\n- Sphinx extensions: sphinx.ext.autodoc\r\n- Extra tools: Nothing\r\n\r\n**Additional context**\r\nN/A\n", + "golden_patch": "diff --git a/sphinx/ext/autodoc/__init__.py b/sphinx/ext/autodoc/__init__.py\n--- a/sphinx/ext/autodoc/__init__.py\n+++ b/sphinx/ext/autodoc/__init__.py\n@@ -1702,7 +1702,8 @@ def add_directive_header(self, sig: str) -> None:\n if not self.options.annotation:\n # obtain annotation for this data\n try:\n- annotations = get_type_hints(self.parent)\n+ annotations = get_type_hints(self.parent, None,\n+ self.config.autodoc_type_aliases)\n except NameError:\n # Failed to evaluate ForwardRef (maybe TYPE_CHECKING)\n annotations = safe_getattr(self.parent, '__annotations__', {})\n@@ -2093,7 +2094,8 @@ def add_directive_header(self, sig: str) -> None:\n if not self.options.annotation:\n # obtain type annotation for this attribute\n try:\n- annotations = get_type_hints(self.parent)\n+ annotations = get_type_hints(self.parent, None,\n+ self.config.autodoc_type_aliases)\n except NameError:\n # Failed to evaluate ForwardRef (maybe TYPE_CHECKING)\n annotations = safe_getattr(self.parent, '__annotations__', {})\n", + "test_patch": "diff --git a/tests/roots/test-ext-autodoc/target/annotations.py b/tests/roots/test-ext-autodoc/target/annotations.py\n--- a/tests/roots/test-ext-autodoc/target/annotations.py\n+++ b/tests/roots/test-ext-autodoc/target/annotations.py\n@@ -4,6 +4,9 @@\n \n myint = int\n \n+#: docstring\n+variable: myint\n+\n \n def sum(x: myint, y: myint) -> myint:\n \"\"\"docstring\"\"\"\n@@ -23,3 +26,10 @@ def mult(x: float, y: float) -> float:\n def mult(x, y):\n \"\"\"docstring\"\"\"\n return x, y\n+\n+\n+class Foo:\n+ \"\"\"docstring\"\"\"\n+\n+ #: docstring\n+ attr: myint\ndiff --git a/tests/test_ext_autodoc_configs.py b/tests/test_ext_autodoc_configs.py\n--- a/tests/test_ext_autodoc_configs.py\n+++ b/tests/test_ext_autodoc_configs.py\n@@ -700,6 +700,19 @@ def test_autodoc_type_aliases(app):\n '.. py:module:: target.annotations',\n '',\n '',\n+ '.. py:class:: Foo()',\n+ ' :module: target.annotations',\n+ '',\n+ ' docstring',\n+ '',\n+ '',\n+ ' .. py:attribute:: Foo.attr',\n+ ' :module: target.annotations',\n+ ' :type: int',\n+ '',\n+ ' docstring',\n+ '',\n+ '',\n '.. py:function:: mult(x: int, y: int) -> int',\n ' mult(x: float, y: float) -> float',\n ' :module: target.annotations',\n@@ -712,6 +725,13 @@ def test_autodoc_type_aliases(app):\n '',\n ' docstring',\n '',\n+ '',\n+ '.. py:data:: variable',\n+ ' :module: target.annotations',\n+ ' :type: int',\n+ '',\n+ ' docstring',\n+ '',\n ]\n \n # define aliases\n@@ -722,6 +742,19 @@ def test_autodoc_type_aliases(app):\n '.. py:module:: target.annotations',\n '',\n '',\n+ '.. py:class:: Foo()',\n+ ' :module: target.annotations',\n+ '',\n+ ' docstring',\n+ '',\n+ '',\n+ ' .. py:attribute:: Foo.attr',\n+ ' :module: target.annotations',\n+ ' :type: myint',\n+ '',\n+ ' docstring',\n+ '',\n+ '',\n '.. py:function:: mult(x: myint, y: myint) -> myint',\n ' mult(x: float, y: float) -> float',\n ' :module: target.annotations',\n@@ -734,6 +767,13 @@ def test_autodoc_type_aliases(app):\n '',\n ' docstring',\n '',\n+ '',\n+ '.. py:data:: variable',\n+ ' :module: target.annotations',\n+ ' :type: myint',\n+ '',\n+ ' docstring',\n+ '',\n ]\n \n \n", + "fail_to_pass": "[\"tests/test_ext_autodoc_configs.py::test_autodoc_type_aliases\"]", + "pass_to_pass": "[\"tests/test_ext_autodoc_configs.py::test_autoclass_content_class\", \"tests/test_ext_autodoc_configs.py::test_autoclass_content_init\", \"tests/test_ext_autodoc_configs.py::test_autoclass_content_both\", \"tests/test_ext_autodoc_configs.py::test_autodoc_inherit_docstrings\", \"tests/test_ext_autodoc_configs.py::test_autodoc_docstring_signature\", \"tests/test_ext_autodoc_configs.py::test_autoclass_content_and_docstring_signature_class\", \"tests/test_ext_autodoc_configs.py::test_autoclass_content_and_docstring_signature_init\", \"tests/test_ext_autodoc_configs.py::test_autoclass_content_and_docstring_signature_both\", \"tests/test_ext_autodoc_configs.py::test_mocked_module_imports\", \"tests/test_ext_autodoc_configs.py::test_autodoc_typehints_signature\", \"tests/test_ext_autodoc_configs.py::test_autodoc_typehints_none\", \"tests/test_ext_autodoc_configs.py::test_autodoc_typehints_none_for_overload\", \"tests/test_ext_autodoc_configs.py::test_autodoc_typehints_description\", \"tests/test_ext_autodoc_configs.py::test_autodoc_typehints_description_for_invalid_node\", \"tests/test_ext_autodoc_configs.py::test_autodoc_default_options\", \"tests/test_ext_autodoc_configs.py::test_autodoc_default_options_with_values\"]", + "expected_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "DataDocumenter.add_directive_header", + "AttributeDocumenter.add_directive_header" + ] + }, + "test_file_spans": { + "tests/roots/test-ext-autodoc/target/annotations.py": [ + "imports" + ], + "tests/test_ext_autodoc_configs.py": [ + "test_autodoc_type_aliases" + ] + }, + "resolved_by": [ + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "DataDocumenter.add_directive_header", + "AttributeDocumenter.add_directive_header" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "DataDocumenter.add_directive_header", + "AttributeDocumenter.add_directive_header" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "DataDocumenter.add_directive_header", + "AttributeDocumenter.add_directive_header" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "DataDocumenter.add_directive_header", + "AttributeDocumenter.add_directive_header" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "DataDocumenter.add_directive_header", + "AttributeDocumenter.add_directive_header" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "DataDocumenter.add_directive_header", + "AttributeDocumenter.add_directive_header" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-20442", + "repo": "sympy/sympy", + "base_commit": "1abbc0ac3e552cb184317194e5d5c5b9dd8fb640", + "problem_statement": "convert_to seems to combine orthogonal units\nTested in sympy 1.4, not presently in a position to install 1.5+.\r\nSimple example. Consider `J = kg*m**2/s**2 => J*s = kg*m**2/s`. The convert_to behavior is odd:\r\n```\r\n>>>convert_to(joule*second,joule)\r\n joule**(7/9)\r\n```\r\nI would expect the unchanged original expression back, an expression in terms of base units, or an error. It appears that convert_to can only readily handle conversions where the full unit expression is valid.\r\n\r\nNote that the following three related examples give sensible results:\r\n```\r\n>>>convert_to(joule*second,joule*second)\r\n joule*second\r\n```\r\n```\r\n>>>convert_to(J*s, kg*m**2/s)\r\n kg*m**2/s\r\n```\r\n```\r\n>>>convert_to(J*s,mins)\r\n J*mins/60\r\n```\n", + "golden_patch": "diff --git a/sympy/physics/units/util.py b/sympy/physics/units/util.py\n--- a/sympy/physics/units/util.py\n+++ b/sympy/physics/units/util.py\n@@ -4,6 +4,7 @@\n \n from sympy import Add, Mul, Pow, Tuple, sympify\n from sympy.core.compatibility import reduce, Iterable, ordered\n+from sympy.matrices.common import NonInvertibleMatrixError\n from sympy.physics.units.dimensions import Dimension\n from sympy.physics.units.prefixes import Prefix\n from sympy.physics.units.quantities import Quantity\n@@ -30,7 +31,11 @@ def _get_conversion_matrix_for_expr(expr, target_units, unit_system):\n camat = Matrix([[dimension_system.get_dimensional_dependencies(i, mark_dimensionless=True).get(j, 0) for i in target_dims] for j in canon_dim_units])\n exprmat = Matrix([dim_dependencies.get(k, 0) for k in canon_dim_units])\n \n- res_exponents = camat.solve_least_squares(exprmat, method=None)\n+ try:\n+ res_exponents = camat.solve(exprmat)\n+ except NonInvertibleMatrixError:\n+ return None\n+\n return res_exponents\n \n \n", + "test_patch": "diff --git a/sympy/physics/units/tests/test_quantities.py b/sympy/physics/units/tests/test_quantities.py\n--- a/sympy/physics/units/tests/test_quantities.py\n+++ b/sympy/physics/units/tests/test_quantities.py\n@@ -1,7 +1,7 @@\n from sympy import (Abs, Add, Function, Number, Rational, S, Symbol,\n diff, exp, integrate, log, sin, sqrt, symbols)\n from sympy.physics.units import (amount_of_substance, convert_to, find_unit,\n- volume, kilometer)\n+ volume, kilometer, joule)\n from sympy.physics.units.definitions import (amu, au, centimeter, coulomb,\n day, foot, grams, hour, inch, kg, km, m, meter, millimeter,\n minute, quart, s, second, speed_of_light, bit,\n@@ -45,6 +45,10 @@ def test_convert_to():\n assert q.convert_to(s) == q\n assert speed_of_light.convert_to(m) == speed_of_light\n \n+ expr = joule*second\n+ conv = convert_to(expr, joule)\n+ assert conv == joule*second\n+\n \n def test_Quantity_definition():\n q = Quantity(\"s10\", abbrev=\"sabbr\")\n", + "fail_to_pass": "[\"test_convert_to\"]", + "pass_to_pass": "[\"test_str_repr\", \"test_eq\", \"test_Quantity_definition\", \"test_abbrev\", \"test_print\", \"test_Quantity_eq\", \"test_add_sub\", \"test_quantity_abs\", \"test_check_unit_consistency\", \"test_mul_div\", \"test_units\", \"test_issue_quart\", \"test_issue_5565\", \"test_find_unit\", \"test_Quantity_derivative\", \"test_quantity_postprocessing\", \"test_factor_and_dimension\", \"test_dimensional_expr_of_derivative\", \"test_get_dimensional_expr_with_function\", \"test_binary_information\", \"test_conversion_with_2_nonstandard_dimensions\", \"test_eval_subs\", \"test_issue_14932\", \"test_issue_14547\"]", + "expected_spans": { + "sympy/physics/units/util.py": [ + "imports", + "_get_conversion_matrix_for_expr" + ] + }, + "test_file_spans": { + "sympy/physics/units/tests/test_quantities.py": [ + "imports", + "test_convert_to" + ] + }, + "resolved_by": [ + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sympy/physics/units/util.py": [ + "convert_to" + ] + }, + "alternative_spans": { + "sympy/physics/units/util.py": [ + "convert_to" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sympy/physics/units/util.py": [ + "_get_conversion_matrix_for_expr" + ] + }, + "alternative_spans": { + "sympy/physics/units/util.py": [ + "_get_conversion_matrix_for_expr" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sympy/physics/units/util.py": [ + "convert_to" + ] + }, + "alternative_spans": { + "sympy/physics/units/util.py": [ + "convert_to" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20241028_agentless-1.5_gpt4o", + "spans": { + "sympy/physics/units/util.py": [ + "convert_to" + ] + } + }, + { + "run_name": "20240622_Lingma_Agent", + "spans": { + "sympy/physics/units/util.py": [ + "_get_conversion_matrix_for_expr" + ] + } + }, + { + "run_name": "20240912_marscode-agent-dev", + "spans": { + "sympy/physics/units/util.py": [ + "convert_to" + ] + } + } + ] + }, + { + "instance_id": "sphinx-doc__sphinx-8474", + "repo": "sphinx-doc/sphinx", + "base_commit": "3ea1ec84cc610f7a9f4f6b354e264565254923ff", + "problem_statement": "v3.3 upgrade started generating \"WARNING: no number is assigned for table\" warnings\nWe've updated to Sphinx 3.3 in our documentation, and suddenly the following warning started popping up in our builds when we build either `singlehtml` or `latex`.:\r\n\r\n`WARNING: no number is assigned for table:`\r\n\r\nI looked through the changelog but it didn't seem like there was anything related to `numref` that was changed, but perhaps I missed something? Could anyone point me to a change in the numref logic so I can figure out where these warnings are coming from?\n", + "golden_patch": "diff --git a/sphinx/domains/std.py b/sphinx/domains/std.py\n--- a/sphinx/domains/std.py\n+++ b/sphinx/domains/std.py\n@@ -852,8 +852,9 @@ def _resolve_numref_xref(self, env: \"BuildEnvironment\", fromdocname: str,\n if fignumber is None:\n return contnode\n except ValueError:\n- logger.warning(__(\"no number is assigned for %s: %s\"), figtype, labelid,\n- location=node)\n+ logger.warning(__(\"Failed to create a cross reference. Any number is not \"\n+ \"assigned: %s\"),\n+ labelid, location=node)\n return contnode\n \n try:\n", + "test_patch": "diff --git a/tests/test_build_html.py b/tests/test_build_html.py\n--- a/tests/test_build_html.py\n+++ b/tests/test_build_html.py\n@@ -660,7 +660,7 @@ def test_numfig_without_numbered_toctree_warn(app, warning):\n \n warnings = warning.getvalue()\n assert 'index.rst:47: WARNING: numfig is disabled. :numref: is ignored.' not in warnings\n- assert 'index.rst:55: WARNING: no number is assigned for section: index' in warnings\n+ assert 'index.rst:55: WARNING: Failed to create a cross reference. Any number is not assigned: index' in warnings\n assert 'index.rst:56: WARNING: invalid numfig_format: invalid' in warnings\n assert 'index.rst:57: WARNING: invalid numfig_format: Fig %s %s' in warnings\n \n@@ -768,7 +768,7 @@ def test_numfig_with_numbered_toctree_warn(app, warning):\n app.build()\n warnings = warning.getvalue()\n assert 'index.rst:47: WARNING: numfig is disabled. :numref: is ignored.' not in warnings\n- assert 'index.rst:55: WARNING: no number is assigned for section: index' in warnings\n+ assert 'index.rst:55: WARNING: Failed to create a cross reference. Any number is not assigned: index' in warnings\n assert 'index.rst:56: WARNING: invalid numfig_format: invalid' in warnings\n assert 'index.rst:57: WARNING: invalid numfig_format: Fig %s %s' in warnings\n \n@@ -873,7 +873,7 @@ def test_numfig_with_prefix_warn(app, warning):\n app.build()\n warnings = warning.getvalue()\n assert 'index.rst:47: WARNING: numfig is disabled. :numref: is ignored.' not in warnings\n- assert 'index.rst:55: WARNING: no number is assigned for section: index' in warnings\n+ assert 'index.rst:55: WARNING: Failed to create a cross reference. Any number is not assigned: index' in warnings\n assert 'index.rst:56: WARNING: invalid numfig_format: invalid' in warnings\n assert 'index.rst:57: WARNING: invalid numfig_format: Fig %s %s' in warnings\n \n@@ -979,7 +979,7 @@ def test_numfig_with_secnum_depth_warn(app, warning):\n app.build()\n warnings = warning.getvalue()\n assert 'index.rst:47: WARNING: numfig is disabled. :numref: is ignored.' not in warnings\n- assert 'index.rst:55: WARNING: no number is assigned for section: index' in warnings\n+ assert 'index.rst:55: WARNING: Failed to create a cross reference. Any number is not assigned: index' in warnings\n assert 'index.rst:56: WARNING: invalid numfig_format: invalid' in warnings\n assert 'index.rst:57: WARNING: invalid numfig_format: Fig %s %s' in warnings\n \n", + "fail_to_pass": "[\"tests/test_build_html.py::test_numfig_without_numbered_toctree_warn\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree_warn\", \"tests/test_build_html.py::test_numfig_with_prefix_warn\", \"tests/test_build_html.py::test_numfig_with_secnum_depth_warn\"]", + "pass_to_pass": "[\"tests/test_build_html.py::test_html4_output\", \"tests/test_build_html.py::test_html5_output[images.html-expect0]\", \"tests/test_build_html.py::test_html5_output[images.html-expect1]\", \"tests/test_build_html.py::test_html5_output[images.html-expect2]\", \"tests/test_build_html.py::test_html5_output[images.html-expect3]\", \"tests/test_build_html.py::test_html5_output[images.html-expect4]\", \"tests/test_build_html.py::test_html5_output[subdir/images.html-expect5]\", \"tests/test_build_html.py::test_html5_output[subdir/images.html-expect6]\", \"tests/test_build_html.py::test_html5_output[subdir/includes.html-expect7]\", \"tests/test_build_html.py::test_html5_output[subdir/includes.html-expect8]\", \"tests/test_build_html.py::test_html5_output[subdir/includes.html-expect9]\", \"tests/test_build_html.py::test_html5_output[subdir/includes.html-expect10]\", \"tests/test_build_html.py::test_html5_output[subdir/includes.html-expect11]\", \"tests/test_build_html.py::test_html5_output[includes.html-expect12]\", \"tests/test_build_html.py::test_html5_output[includes.html-expect13]\", \"tests/test_build_html.py::test_html5_output[includes.html-expect14]\", \"tests/test_build_html.py::test_html5_output[includes.html-expect15]\", \"tests/test_build_html.py::test_html5_output[includes.html-expect16]\", \"tests/test_build_html.py::test_html5_output[includes.html-expect17]\", \"tests/test_build_html.py::test_html5_output[includes.html-expect18]\", \"tests/test_build_html.py::test_html5_output[includes.html-expect19]\", \"tests/test_build_html.py::test_html5_output[includes.html-expect20]\", \"tests/test_build_html.py::test_html5_output[includes.html-expect21]\", \"tests/test_build_html.py::test_html5_output[includes.html-expect22]\", \"tests/test_build_html.py::test_html5_output[includes.html-expect23]\", \"tests/test_build_html.py::test_html5_output[includes.html-expect24]\", \"tests/test_build_html.py::test_html5_output[autodoc.html-expect25]\", \"tests/test_build_html.py::test_html5_output[autodoc.html-expect26]\", \"tests/test_build_html.py::test_html5_output[autodoc.html-expect27]\", \"tests/test_build_html.py::test_html5_output[autodoc.html-expect28]\", \"tests/test_build_html.py::test_html5_output[extapi.html-expect29]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect30]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect31]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect32]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect33]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect34]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect35]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect36]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect37]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect38]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect39]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect40]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect41]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect42]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect43]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect44]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect45]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect46]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect47]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect48]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect49]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect50]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect51]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect52]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect53]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect54]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect55]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect56]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect57]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect58]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect59]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect60]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect61]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect62]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect63]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect64]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect66]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect67]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect68]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect69]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect70]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect71]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect72]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect73]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect74]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect75]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect76]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect77]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect78]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect80]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect81]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect82]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect83]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect84]\", \"tests/test_build_html.py::test_html5_output[markup.html-expect85]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect86]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect87]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect88]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect89]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect90]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect91]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect92]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect93]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect94]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect95]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect96]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect97]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect98]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect99]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect100]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect101]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect102]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect103]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect104]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect105]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect106]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect107]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect108]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect109]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect110]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect111]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect112]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect113]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect114]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect115]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect116]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect117]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect118]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect119]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect120]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect121]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect122]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect123]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect124]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect125]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect126]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect127]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect128]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect129]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect130]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect131]\", \"tests/test_build_html.py::test_html5_output[objects.html-expect132]\", \"tests/test_build_html.py::test_html5_output[index.html-expect133]\", \"tests/test_build_html.py::test_html5_output[index.html-expect134]\", \"tests/test_build_html.py::test_html5_output[index.html-expect137]\", \"tests/test_build_html.py::test_html5_output[index.html-expect138]\", \"tests/test_build_html.py::test_html5_output[index.html-expect139]\", \"tests/test_build_html.py::test_html5_output[index.html-expect140]\", \"tests/test_build_html.py::test_html5_output[index.html-expect141]\", \"tests/test_build_html.py::test_html5_output[index.html-expect142]\", \"tests/test_build_html.py::test_html5_output[index.html-expect143]\", \"tests/test_build_html.py::test_html5_output[index.html-expect144]\", \"tests/test_build_html.py::test_html5_output[index.html-expect145]\", \"tests/test_build_html.py::test_html5_output[index.html-expect146]\", \"tests/test_build_html.py::test_html5_output[index.html-expect147]\", \"tests/test_build_html.py::test_html5_output[index.html-expect148]\", \"tests/test_build_html.py::test_html5_output[bom.html-expect149]\", \"tests/test_build_html.py::test_html5_output[extensions.html-expect150]\", \"tests/test_build_html.py::test_html5_output[extensions.html-expect151]\", \"tests/test_build_html.py::test_html5_output[extensions.html-expect152]\", \"tests/test_build_html.py::test_html5_output[genindex.html-expect153]\", \"tests/test_build_html.py::test_html5_output[genindex.html-expect154]\", \"tests/test_build_html.py::test_html5_output[genindex.html-expect155]\", \"tests/test_build_html.py::test_html5_output[genindex.html-expect156]\", \"tests/test_build_html.py::test_html5_output[genindex.html-expect157]\", \"tests/test_build_html.py::test_html5_output[otherext.html-expect173]\", \"tests/test_build_html.py::test_html5_output[otherext.html-expect174]\", \"tests/test_build_html.py::test_html_parallel\", \"tests/test_build_html.py::test_html_download\", \"tests/test_build_html.py::test_html_download_role\", \"tests/test_build_html.py::test_html_translator\", \"tests/test_build_html.py::test_tocdepth[index.html-expect0]\", \"tests/test_build_html.py::test_tocdepth[index.html-expect1]\", \"tests/test_build_html.py::test_tocdepth[index.html-expect2]\", \"tests/test_build_html.py::test_tocdepth[index.html-expect3]\", \"tests/test_build_html.py::test_tocdepth[foo.html-expect4]\", \"tests/test_build_html.py::test_tocdepth[foo.html-expect5]\", \"tests/test_build_html.py::test_tocdepth[foo.html-expect6]\", \"tests/test_build_html.py::test_tocdepth[foo.html-expect7]\", \"tests/test_build_html.py::test_tocdepth[foo.html-expect8]\", \"tests/test_build_html.py::test_tocdepth[foo.html-expect9]\", \"tests/test_build_html.py::test_tocdepth[foo.html-expect10]\", \"tests/test_build_html.py::test_tocdepth[foo.html-expect11]\", \"tests/test_build_html.py::test_tocdepth[foo.html-expect12]\", \"tests/test_build_html.py::test_tocdepth[foo.html-expect13]\", \"tests/test_build_html.py::test_tocdepth[foo.html-expect14]\", \"tests/test_build_html.py::test_tocdepth[foo.html-expect15]\", \"tests/test_build_html.py::test_tocdepth[foo.html-expect16]\", \"tests/test_build_html.py::test_tocdepth[foo.html-expect17]\", \"tests/test_build_html.py::test_tocdepth[bar.html-expect18]\", \"tests/test_build_html.py::test_tocdepth[bar.html-expect19]\", \"tests/test_build_html.py::test_tocdepth[bar.html-expect20]\", \"tests/test_build_html.py::test_tocdepth[bar.html-expect21]\", \"tests/test_build_html.py::test_tocdepth[bar.html-expect22]\", \"tests/test_build_html.py::test_tocdepth[bar.html-expect23]\", \"tests/test_build_html.py::test_tocdepth[bar.html-expect24]\", \"tests/test_build_html.py::test_tocdepth[bar.html-expect25]\", \"tests/test_build_html.py::test_tocdepth[bar.html-expect26]\", \"tests/test_build_html.py::test_tocdepth[bar.html-expect27]\", \"tests/test_build_html.py::test_tocdepth[bar.html-expect28]\", \"tests/test_build_html.py::test_tocdepth[bar.html-expect29]\", \"tests/test_build_html.py::test_tocdepth[baz.html-expect30]\", \"tests/test_build_html.py::test_tocdepth[baz.html-expect31]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect0]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect1]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect2]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect3]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect4]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect5]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect6]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect7]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect8]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect9]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect10]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect11]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect12]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect13]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect14]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect15]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect16]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect17]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect18]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect19]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect20]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect21]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect22]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect23]\", \"tests/test_build_html.py::test_tocdepth_singlehtml[index.html-expect24]\", \"tests/test_build_html.py::test_numfig_disabled_warn\", \"tests/test_build_html.py::test_numfig_disabled[index.html-expect0]\", \"tests/test_build_html.py::test_numfig_disabled[index.html-expect1]\", \"tests/test_build_html.py::test_numfig_disabled[index.html-expect2]\", \"tests/test_build_html.py::test_numfig_disabled[index.html-expect3]\", \"tests/test_build_html.py::test_numfig_disabled[index.html-expect4]\", \"tests/test_build_html.py::test_numfig_disabled[index.html-expect5]\", \"tests/test_build_html.py::test_numfig_disabled[index.html-expect6]\", \"tests/test_build_html.py::test_numfig_disabled[index.html-expect7]\", \"tests/test_build_html.py::test_numfig_disabled[index.html-expect8]\", \"tests/test_build_html.py::test_numfig_disabled[index.html-expect9]\", \"tests/test_build_html.py::test_numfig_disabled[index.html-expect10]\", \"tests/test_build_html.py::test_numfig_disabled[index.html-expect11]\", \"tests/test_build_html.py::test_numfig_disabled[index.html-expect12]\", \"tests/test_build_html.py::test_numfig_disabled[foo.html-expect13]\", \"tests/test_build_html.py::test_numfig_disabled[foo.html-expect14]\", \"tests/test_build_html.py::test_numfig_disabled[foo.html-expect15]\", \"tests/test_build_html.py::test_numfig_disabled[bar.html-expect16]\", \"tests/test_build_html.py::test_numfig_disabled[bar.html-expect17]\", \"tests/test_build_html.py::test_numfig_disabled[bar.html-expect18]\", \"tests/test_build_html.py::test_numfig_disabled[baz.html-expect19]\", \"tests/test_build_html.py::test_numfig_disabled[baz.html-expect20]\", \"tests/test_build_html.py::test_numfig_disabled[baz.html-expect21]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[index.html-expect2]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[index.html-expect3]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[index.html-expect4]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[index.html-expect5]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[index.html-expect6]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[index.html-expect7]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[index.html-expect8]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[index.html-expect9]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[index.html-expect10]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[index.html-expect11]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[index.html-expect12]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[index.html-expect13]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[index.html-expect14]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[index.html-expect15]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[foo.html-expect20]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[foo.html-expect21]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[foo.html-expect22]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[foo.html-expect23]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[foo.html-expect24]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[foo.html-expect25]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[foo.html-expect26]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[foo.html-expect27]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[bar.html-expect31]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[bar.html-expect32]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[bar.html-expect33]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[bar.html-expect34]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[bar.html-expect35]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[bar.html-expect36]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[baz.html-expect38]\", \"tests/test_build_html.py::test_numfig_without_numbered_toctree[baz.html-expect39]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[index.html-expect2]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[index.html-expect3]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[index.html-expect4]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[index.html-expect5]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[index.html-expect6]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[index.html-expect7]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[index.html-expect8]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[index.html-expect9]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[index.html-expect10]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[index.html-expect11]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[index.html-expect12]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[index.html-expect13]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[index.html-expect14]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[index.html-expect15]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[foo.html-expect20]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[foo.html-expect21]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[foo.html-expect22]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[foo.html-expect23]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[foo.html-expect24]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[foo.html-expect25]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[foo.html-expect26]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[foo.html-expect27]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[bar.html-expect31]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[bar.html-expect32]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[bar.html-expect33]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[bar.html-expect34]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[bar.html-expect35]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[bar.html-expect36]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[baz.html-expect38]\", \"tests/test_build_html.py::test_numfig_with_numbered_toctree[baz.html-expect39]\", \"tests/test_build_html.py::test_numfig_with_prefix[index.html-expect2]\", \"tests/test_build_html.py::test_numfig_with_prefix[index.html-expect3]\", \"tests/test_build_html.py::test_numfig_with_prefix[index.html-expect4]\", \"tests/test_build_html.py::test_numfig_with_prefix[index.html-expect5]\", \"tests/test_build_html.py::test_numfig_with_prefix[index.html-expect6]\", \"tests/test_build_html.py::test_numfig_with_prefix[index.html-expect7]\", \"tests/test_build_html.py::test_numfig_with_prefix[index.html-expect8]\", \"tests/test_build_html.py::test_numfig_with_prefix[index.html-expect9]\", \"tests/test_build_html.py::test_numfig_with_prefix[index.html-expect10]\", \"tests/test_build_html.py::test_numfig_with_prefix[index.html-expect11]\", \"tests/test_build_html.py::test_numfig_with_prefix[index.html-expect12]\", \"tests/test_build_html.py::test_numfig_with_prefix[index.html-expect13]\", \"tests/test_build_html.py::test_numfig_with_prefix[index.html-expect14]\", \"tests/test_build_html.py::test_numfig_with_prefix[index.html-expect15]\", \"tests/test_build_html.py::test_numfig_with_prefix[foo.html-expect20]\", \"tests/test_build_html.py::test_numfig_with_prefix[foo.html-expect21]\", \"tests/test_build_html.py::test_numfig_with_prefix[foo.html-expect22]\", \"tests/test_build_html.py::test_numfig_with_prefix[foo.html-expect23]\", \"tests/test_build_html.py::test_numfig_with_prefix[foo.html-expect24]\", \"tests/test_build_html.py::test_numfig_with_prefix[foo.html-expect25]\", \"tests/test_build_html.py::test_numfig_with_prefix[foo.html-expect26]\", \"tests/test_build_html.py::test_numfig_with_prefix[foo.html-expect27]\", \"tests/test_build_html.py::test_numfig_with_prefix[bar.html-expect31]\", \"tests/test_build_html.py::test_numfig_with_prefix[bar.html-expect32]\", \"tests/test_build_html.py::test_numfig_with_prefix[bar.html-expect33]\", \"tests/test_build_html.py::test_numfig_with_prefix[bar.html-expect34]\", \"tests/test_build_html.py::test_numfig_with_prefix[bar.html-expect35]\", \"tests/test_build_html.py::test_numfig_with_prefix[bar.html-expect36]\", \"tests/test_build_html.py::test_numfig_with_prefix[baz.html-expect38]\", \"tests/test_build_html.py::test_numfig_with_prefix[baz.html-expect39]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[index.html-expect2]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[index.html-expect3]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[index.html-expect4]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[index.html-expect5]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[index.html-expect6]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[index.html-expect7]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[index.html-expect8]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[index.html-expect9]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[index.html-expect10]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[index.html-expect11]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[index.html-expect12]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[index.html-expect13]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[index.html-expect14]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[index.html-expect15]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[foo.html-expect20]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[foo.html-expect21]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[foo.html-expect22]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[foo.html-expect23]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[foo.html-expect24]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[foo.html-expect25]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[foo.html-expect26]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[foo.html-expect27]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[bar.html-expect31]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[bar.html-expect32]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[bar.html-expect33]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[bar.html-expect34]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[bar.html-expect35]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[bar.html-expect36]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[baz.html-expect38]\", \"tests/test_build_html.py::test_numfig_with_secnum_depth[baz.html-expect39]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect2]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect3]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect4]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect5]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect6]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect7]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect8]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect9]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect10]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect11]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect12]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect13]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect14]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect15]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect20]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect21]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect22]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect23]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect24]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect25]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect26]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect27]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect31]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect32]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect33]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect34]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect35]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect36]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect38]\", \"tests/test_build_html.py::test_numfig_with_singlehtml[index.html-expect39]\", \"tests/test_build_html.py::test_enumerable_node[index.html-expect3]\", \"tests/test_build_html.py::test_enumerable_node[index.html-expect4]\", \"tests/test_build_html.py::test_enumerable_node[index.html-expect5]\", \"tests/test_build_html.py::test_enumerable_node[index.html-expect6]\", \"tests/test_build_html.py::test_enumerable_node[index.html-expect7]\", \"tests/test_build_html.py::test_enumerable_node[index.html-expect8]\", \"tests/test_build_html.py::test_enumerable_node[index.html-expect9]\", \"tests/test_build_html.py::test_html_assets\", \"tests/test_build_html.py::test_html_copy_source\", \"tests/test_build_html.py::test_html_sourcelink_suffix\", \"tests/test_build_html.py::test_html_sourcelink_suffix_same\", \"tests/test_build_html.py::test_html_sourcelink_suffix_empty\", \"tests/test_build_html.py::test_html_entity\", \"tests/test_build_html.py::test_html_inventory\", \"tests/test_build_html.py::test_html_raw_directive\", \"tests/test_build_html.py::test_alternate_stylesheets[index.html-expect0]\", \"tests/test_build_html.py::test_alternate_stylesheets[index.html-expect1]\", \"tests/test_build_html.py::test_alternate_stylesheets[index.html-expect2]\", \"tests/test_build_html.py::test_alternate_stylesheets[index.html-expect3]\", \"tests/test_build_html.py::test_alternate_stylesheets[index.html-expect4]\", \"tests/test_build_html.py::test_alternate_stylesheets[index.html-expect5]\", \"tests/test_build_html.py::test_alternate_stylesheets[index.html-expect6]\", \"tests/test_build_html.py::test_alternate_stylesheets[index.html-expect7]\", \"tests/test_build_html.py::test_html_style\", \"tests/test_build_html.py::test_html_remote_images\", \"tests/test_build_html.py::test_html_sidebar\", \"tests/test_build_html.py::test_html_manpage[index.html-expect0]\", \"tests/test_build_html.py::test_html_manpage[index.html-expect1]\", \"tests/test_build_html.py::test_html_manpage[index.html-expect2]\", \"tests/test_build_html.py::test_html_baseurl\", \"tests/test_build_html.py::test_html_baseurl_and_html_file_suffix\", \"tests/test_build_html.py::test_default_html_math_renderer\", \"tests/test_build_html.py::test_html_math_renderer_is_mathjax\", \"tests/test_build_html.py::test_html_math_renderer_is_imgmath\", \"tests/test_build_html.py::test_html_math_renderer_is_duplicated\", \"tests/test_build_html.py::test_html_math_renderer_is_duplicated2\", \"tests/test_build_html.py::test_html_math_renderer_is_chosen\", \"tests/test_build_html.py::test_html_math_renderer_is_mismatched\", \"tests/test_build_html.py::test_html_pygments_style_default\", \"tests/test_build_html.py::test_html_pygments_style_manually\", \"tests/test_build_html.py::test_html_pygments_for_classic_theme\", \"tests/test_build_html.py::test_html_dark_pygments_style_default\", \"tests/test_build_html.py::test_validate_html_extra_path\", \"tests/test_build_html.py::test_validate_html_static_path\", \"tests/test_build_html.py::test_html_codeblock_linenos_style_inline\"]", + "expected_spans": { + "sphinx/domains/std.py": [ + "StandardDomain._resolve_numref_xref" + ] + }, + "test_file_spans": { + "tests/test_build_html.py": [ + "test_numfig_without_numbered_toctree_warn", + "test_numfig_with_numbered_toctree_warn", + "test_numfig_with_prefix_warn", + "test_numfig_with_secnum_depth_warn" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-13710", + "repo": "django/django", + "base_commit": "1bd6a7a0acc11e249fca11c017505ad39f15ebf6", + "problem_statement": "Use Admin Inline verbose_name as default for Inline verbose_name_plural\nDescription\n\t\nDjango allows specification of a verbose_name and a verbose_name_plural for Inline classes in admin views. However, verbose_name_plural for an Inline is not currently based on a specified verbose_name. Instead, it continues to be based on the model name, or an a verbose_name specified in the model's Meta class. This was confusing to me initially (I didn't understand why I had to specify both name forms for an Inline if I wanted to overrule the default name), and seems inconsistent with the approach for a model's Meta class (which does automatically base the plural form on a specified verbose_name). I propose that verbose_name_plural for an Inline class should by default be based on the verbose_name for an Inline if that is specified.\nI have written a patch to implement this, including tests. Would be happy to submit that.\n", + "golden_patch": "diff --git a/django/contrib/admin/options.py b/django/contrib/admin/options.py\n--- a/django/contrib/admin/options.py\n+++ b/django/contrib/admin/options.py\n@@ -2037,10 +2037,13 @@ def __init__(self, parent_model, admin_site):\n self.opts = self.model._meta\n self.has_registered_model = admin_site.is_registered(self.model)\n super().__init__()\n+ if self.verbose_name_plural is None:\n+ if self.verbose_name is None:\n+ self.verbose_name_plural = self.model._meta.verbose_name_plural\n+ else:\n+ self.verbose_name_plural = format_lazy('{}s', self.verbose_name)\n if self.verbose_name is None:\n self.verbose_name = self.model._meta.verbose_name\n- if self.verbose_name_plural is None:\n- self.verbose_name_plural = self.model._meta.verbose_name_plural\n \n @property\n def media(self):\n", + "test_patch": "diff --git a/tests/admin_inlines/tests.py b/tests/admin_inlines/tests.py\n--- a/tests/admin_inlines/tests.py\n+++ b/tests/admin_inlines/tests.py\n@@ -967,6 +967,55 @@ def test_extra_inlines_are_not_shown(self):\n class TestVerboseNameInlineForms(TestDataMixin, TestCase):\n factory = RequestFactory()\n \n+ def test_verbose_name_inline(self):\n+ class NonVerboseProfileInline(TabularInline):\n+ model = Profile\n+ verbose_name = 'Non-verbose childs'\n+\n+ class VerboseNameProfileInline(TabularInline):\n+ model = VerboseNameProfile\n+ verbose_name = 'Childs with verbose name'\n+\n+ class VerboseNamePluralProfileInline(TabularInline):\n+ model = VerboseNamePluralProfile\n+ verbose_name = 'Childs with verbose name plural'\n+\n+ class BothVerboseNameProfileInline(TabularInline):\n+ model = BothVerboseNameProfile\n+ verbose_name = 'Childs with both verbose names'\n+\n+ modeladmin = ModelAdmin(ProfileCollection, admin_site)\n+ modeladmin.inlines = [\n+ NonVerboseProfileInline,\n+ VerboseNameProfileInline,\n+ VerboseNamePluralProfileInline,\n+ BothVerboseNameProfileInline,\n+ ]\n+ obj = ProfileCollection.objects.create()\n+ url = reverse('admin:admin_inlines_profilecollection_change', args=(obj.pk,))\n+ request = self.factory.get(url)\n+ request.user = self.superuser\n+ response = modeladmin.changeform_view(request)\n+ self.assertNotContains(response, 'Add another Profile')\n+ # Non-verbose model.\n+ self.assertContains(response, '

Non-verbose childss

')\n+ self.assertContains(response, 'Add another Non-verbose child')\n+ self.assertNotContains(response, '

Profiles

')\n+ # Model with verbose name.\n+ self.assertContains(response, '

Childs with verbose names

')\n+ self.assertContains(response, 'Add another Childs with verbose name')\n+ self.assertNotContains(response, '

Model with verbose name onlys

')\n+ self.assertNotContains(response, 'Add another Model with verbose name only')\n+ # Model with verbose name plural.\n+ self.assertContains(response, '

Childs with verbose name plurals

')\n+ self.assertContains(response, 'Add another Childs with verbose name plural')\n+ self.assertNotContains(response, '

Model with verbose name plural only

')\n+ # Model with both verbose names.\n+ self.assertContains(response, '

Childs with both verbose namess

')\n+ self.assertContains(response, 'Add another Childs with both verbose names')\n+ self.assertNotContains(response, '

Model with both - plural name

')\n+ self.assertNotContains(response, 'Add another Model with both - name')\n+\n def test_verbose_name_plural_inline(self):\n class NonVerboseProfileInline(TabularInline):\n model = Profile\n", + "fail_to_pass": "[\"test_verbose_name_inline (admin_inlines.tests.TestVerboseNameInlineForms)\"]", + "pass_to_pass": "[\"Regression for #9362\", \"test_deleting_inline_with_protected_delete_does_not_validate (admin_inlines.tests.TestInlineProtectedOnDelete)\", \"test_all_inline_media (admin_inlines.tests.TestInlineMedia)\", \"test_inline_media_only_base (admin_inlines.tests.TestInlineMedia)\", \"test_inline_media_only_inline (admin_inlines.tests.TestInlineMedia)\", \"test_both_verbose_names_inline (admin_inlines.tests.TestVerboseNameInlineForms)\", \"test_verbose_name_plural_inline (admin_inlines.tests.TestVerboseNameInlineForms)\", \"test_add_url_not_allowed (admin_inlines.tests.TestReadOnlyChangeViewInlinePermissions)\", \"test_extra_inlines_are_not_shown (admin_inlines.tests.TestReadOnlyChangeViewInlinePermissions)\", \"test_get_to_change_url_is_allowed (admin_inlines.tests.TestReadOnlyChangeViewInlinePermissions)\", \"test_inline_delete_buttons_are_not_shown (admin_inlines.tests.TestReadOnlyChangeViewInlinePermissions)\", \"test_inlines_are_rendered_as_read_only (admin_inlines.tests.TestReadOnlyChangeViewInlinePermissions)\", \"test_main_model_is_rendered_as_read_only (admin_inlines.tests.TestReadOnlyChangeViewInlinePermissions)\", \"test_post_to_change_url_not_allowed (admin_inlines.tests.TestReadOnlyChangeViewInlinePermissions)\", \"test_submit_line_shows_only_close_button (admin_inlines.tests.TestReadOnlyChangeViewInlinePermissions)\", \"test_inline_add_fk_add_perm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_add_fk_noperm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_add_m2m_add_perm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_add_m2m_noperm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_add_m2m_view_only_perm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_fk_add_change_perm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_fk_add_perm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_fk_all_perms (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_fk_change_del_perm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_fk_change_perm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_fk_noperm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_m2m_add_perm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_m2m_change_perm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_m2m_noperm (admin_inlines.tests.TestInlinePermissions)\", \"test_inline_change_m2m_view_only_perm (admin_inlines.tests.TestInlinePermissions)\", \"Admin inline should invoke local callable when its name is listed in readonly_fields\", \"can_delete should be passed to inlineformset factory.\", \"An object can be created with inlines when it inherits another class.\", \"test_custom_form_tabular_inline_extra_field_label (admin_inlines.tests.TestInline)\", \"A model form with a form field specified (TitleForm.title1) should have\", \"SomeChildModelForm.__init__() overrides the label of a form field.\", \"test_custom_get_extra_form (admin_inlines.tests.TestInline)\", \"test_custom_min_num (admin_inlines.tests.TestInline)\", \"The \\\"View on Site\\\" link is correct for models with a custom primary key\", \"The inlines' model field help texts are displayed when using both the\", \"test_inline_editable_pk (admin_inlines.tests.TestInline)\", \"#18263 -- Make sure hidden fields don't get a column in tabular inlines\", \"test_inline_nonauto_noneditable_inherited_pk (admin_inlines.tests.TestInline)\", \"test_inline_nonauto_noneditable_pk (admin_inlines.tests.TestInline)\", \"test_inline_primary (admin_inlines.tests.TestInline)\", \"test_inlines_plural_heading_foreign_key (admin_inlines.tests.TestInline)\", \"Inlines `show_change_link` for registered models when enabled.\", \"Inlines `show_change_link` disabled for unregistered models.\", \"test_inlines_singular_heading_one_to_one (admin_inlines.tests.TestInline)\", \"The \\\"View on Site\\\" link is correct for locales that use thousand\", \"Autogenerated many-to-many inlines are displayed correctly (#13407)\", \"min_num and extra determine number of forms.\", \"Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable\", \"test_non_editable_custom_form_tabular_inline_extra_field_label (admin_inlines.tests.TestInline)\", \"Multiple inlines with related_name='+' have correct form prefixes.\", \"Inlines without change permission shows field inputs on add form.\", \"Bug #13174.\", \"test_stacked_inline_edit_form_contains_has_original_class (admin_inlines.tests.TestInline)\", \"Field names are included in the context to output a field-specific\", \"Inlines `show_change_link` disabled by default.\", \"Tabular inlines use ModelForm.Meta.help_texts and labels for read-only\", \"non_field_errors are displayed correctly, including the correct value\"]", + "expected_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "test_file_spans": { + "tests/admin_inlines/tests.py": [ + "TestVerboseNameInlineForms.test_verbose_name_plural_inline" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ], + "tests/admin_inlines/tests.py": [ + "SeleniumTests", + "SeleniumTests.test_inlines_verbose_name" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ], + "tests/admin_inlines/tests.py": [ + "TestVerboseNameInlineForms.test_verbose_name_plural_inline", + "TestVerboseNameInlineForms.test_both_verbose_names_inline" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ], + "tests/admin_views/test_adminsite.py": [] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/contrib/admin/options.py": [ + "imports", + "InlineModelAdmin.__init__" + ], + "tests/admin_inlines/tests.py": [] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "imports", + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__", + "InlineModelAdmin.media" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__", + "InlineModelAdmin.media" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ], + "tests/admin_views/test_adminsite.py": [] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__", + "ModelAdmin.get_search_results" + ] + }, + "alternative_spans": { + "django/contrib/admin/options.py": [ + "InlineModelAdmin.__init__", + "ModelAdmin.get_search_results" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sphinx-doc__sphinx-8506", + "repo": "sphinx-doc/sphinx", + "base_commit": "e4bd3bd3ddd42c6642ff779a4f7381f219655c2c", + "problem_statement": "Sphinx 3.2 complains about option:: syntax that earlier versions accepted\nSphinx 3.2 complains about use of the option:: directive that earlier versions accepted without complaint.\r\n\r\nThe QEMU documentation includes this:\r\n```\r\n.. option:: [enable=]PATTERN\r\n\r\n Immediately enable events matching *PATTERN*\r\n```\r\n\r\nas part of the documentation of the command line options of one of its programs. Earlier versions of Sphinx were fine with this, but Sphinx 3.2 complains:\r\n\r\n```\r\nWarning, treated as error:\r\n../../docs/qemu-option-trace.rst.inc:4:Malformed option description '[enable=]PATTERN', should look like \"opt\", \"-opt args\", \"--opt args\", \"/opt args\" or \"+opt args\"\r\n```\r\n\r\nSphinx ideally shouldn't change in ways that break the building of documentation that worked in older versions, because this makes it unworkably difficult to have documentation that builds with whatever the Linux distro's sphinx-build is.\r\n\r\nThe error message suggests that Sphinx has a very restrictive idea of what option syntax is; it would be better if it just accepted any string, because not all programs and OSes have option syntax that matches the limited list the error message indicates.\r\n\n", + "golden_patch": "diff --git a/sphinx/domains/std.py b/sphinx/domains/std.py\n--- a/sphinx/domains/std.py\n+++ b/sphinx/domains/std.py\n@@ -43,7 +43,7 @@\n \n \n # RE for option descriptions\n-option_desc_re = re.compile(r'((?:/|--|-|\\+)?[^\\s=[]+)(=?\\s*.*)')\n+option_desc_re = re.compile(r'((?:/|--|-|\\+)?[^\\s=]+)(=?\\s*.*)')\n # RE for grammar tokens\n token_re = re.compile(r'`(\\w+)`', re.U)\n \n@@ -197,6 +197,11 @@ def handle_signature(self, sig: str, signode: desc_signature) -> str:\n location=signode)\n continue\n optname, args = m.groups()\n+ if optname.endswith('[') and args.endswith(']'):\n+ # optional value surrounded by brackets (ex. foo[=bar])\n+ optname = optname[:-1]\n+ args = '[' + args\n+\n if count:\n signode += addnodes.desc_addname(', ', ', ')\n signode += addnodes.desc_name(optname, optname)\n", + "test_patch": "diff --git a/tests/test_domain_std.py b/tests/test_domain_std.py\n--- a/tests/test_domain_std.py\n+++ b/tests/test_domain_std.py\n@@ -91,6 +91,28 @@ def test_get_full_qualified_name():\n assert domain.get_full_qualified_name(node) == 'ls.-l'\n \n \n+def test_cmd_option_with_optional_value(app):\n+ text = \".. option:: -j[=N]\"\n+ doctree = restructuredtext.parse(app, text)\n+ assert_node(doctree, (index,\n+ [desc, ([desc_signature, ([desc_name, '-j'],\n+ [desc_addname, '[=N]'])],\n+ [desc_content, ()])]))\n+ objects = list(app.env.get_domain(\"std\").get_objects())\n+ assert ('-j', '-j', 'cmdoption', 'index', 'cmdoption-j', 1) in objects\n+\n+\n+def test_cmd_option_starting_with_bracket(app):\n+ text = \".. option:: [enable=]PATTERN\"\n+ doctree = restructuredtext.parse(app, text)\n+ assert_node(doctree, (index,\n+ [desc, ([desc_signature, ([desc_name, '[enable'],\n+ [desc_addname, '=]PATTERN'])],\n+ [desc_content, ()])]))\n+ objects = list(app.env.get_domain(\"std\").get_objects())\n+ assert ('[enable', '[enable', 'cmdoption', 'index', 'cmdoption-arg-enable', 1) in objects\n+\n+\n def test_glossary(app):\n text = (\".. glossary::\\n\"\n \"\\n\"\n", + "fail_to_pass": "[\"tests/test_domain_std.py::test_cmd_option_starting_with_bracket\"]", + "pass_to_pass": "[\"tests/test_domain_std.py::test_process_doc_handle_figure_caption\", \"tests/test_domain_std.py::test_process_doc_handle_table_title\", \"tests/test_domain_std.py::test_get_full_qualified_name\", \"tests/test_domain_std.py::test_cmd_option_with_optional_value\", \"tests/test_domain_std.py::test_glossary\", \"tests/test_domain_std.py::test_glossary_warning\", \"tests/test_domain_std.py::test_glossary_comment\", \"tests/test_domain_std.py::test_glossary_comment2\", \"tests/test_domain_std.py::test_glossary_sorted\", \"tests/test_domain_std.py::test_glossary_alphanumeric\", \"tests/test_domain_std.py::test_glossary_conflicted_labels\", \"tests/test_domain_std.py::test_cmdoption\", \"tests/test_domain_std.py::test_multiple_cmdoptions\", \"tests/test_domain_std.py::test_disabled_docref\"]", + "expected_spans": { + "sphinx/domains/std.py": [ + "imports", + "Cmdoption.handle_signature" + ] + }, + "test_file_spans": { + "tests/test_domain_std.py": [ + "test_glossary" + ] + }, + "resolved_by": [ + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sphinx/domains/std.py": [ + "imports" + ] + }, + "alternative_spans": { + "sphinx/domains/std.py": [ + "imports" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sphinx/domains/std.py": [ + "imports", + "Cmdoption.handle_signature" + ] + }, + "alternative_spans": { + "sphinx/domains/std.py": [ + "imports", + "Cmdoption.handle_signature" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240617_factory_code_droid", + "spans": { + "sphinx/domains/std.py": [ + "imports" + ] + } + } + ] + }, + { + "instance_id": "django__django-13757", + "repo": "django/django", + "base_commit": "3f140dde51c0fe6c350acb7727bbe489a99f0632", + "problem_statement": "Using __isnull=True on a KeyTransform should not match JSON null on SQLite and Oracle\nDescription\n\t\nThe KeyTransformIsNull lookup borrows the logic from HasKey for isnull=False, which is correct. If isnull=True, the query should only match objects that do not have the key. The query is correct for MariaDB, MySQL, and PostgreSQL. However, on SQLite and Oracle, the query also matches objects that have the key with the value null, which is incorrect.\nTo confirm, edit tests.model_fields.test_jsonfield.TestQuerying.test_isnull_key. For the first assertion, change\n\t\tself.assertSequenceEqual(\n\t\t\tNullableJSONModel.objects.filter(value__a__isnull=True),\n\t\t\tself.objs[:3] + self.objs[5:],\n\t\t)\nto\n\t\tself.assertSequenceEqual(\n\t\t\tNullableJSONModel.objects.filter(value__j__isnull=True),\n\t\t\tself.objs[:4] + self.objs[5:],\n\t\t)\nThe test previously only checks with value__a which could not catch this behavior because the value is not JSON null.\n", + "golden_patch": "diff --git a/django/db/models/fields/json.py b/django/db/models/fields/json.py\n--- a/django/db/models/fields/json.py\n+++ b/django/db/models/fields/json.py\n@@ -366,14 +366,25 @@ def process_rhs(self, compiler, connection):\n class KeyTransformIsNull(lookups.IsNull):\n # key__isnull=False is the same as has_key='key'\n def as_oracle(self, compiler, connection):\n+ sql, params = HasKey(\n+ self.lhs.lhs,\n+ self.lhs.key_name,\n+ ).as_oracle(compiler, connection)\n if not self.rhs:\n- return HasKey(self.lhs.lhs, self.lhs.key_name).as_oracle(compiler, connection)\n- return super().as_sql(compiler, connection)\n+ return sql, params\n+ # Column doesn't have a key or IS NULL.\n+ lhs, lhs_params, _ = self.lhs.preprocess_lhs(compiler, connection)\n+ return '(NOT %s OR %s IS NULL)' % (sql, lhs), tuple(params) + tuple(lhs_params)\n \n def as_sqlite(self, compiler, connection):\n+ template = 'JSON_TYPE(%s, %%s) IS NULL'\n if not self.rhs:\n- return HasKey(self.lhs.lhs, self.lhs.key_name).as_sqlite(compiler, connection)\n- return super().as_sql(compiler, connection)\n+ template = 'JSON_TYPE(%s, %%s) IS NOT NULL'\n+ return HasKey(self.lhs.lhs, self.lhs.key_name).as_sql(\n+ compiler,\n+ connection,\n+ template=template,\n+ )\n \n \n class KeyTransformIn(lookups.In):\n", + "test_patch": "diff --git a/tests/model_fields/test_jsonfield.py b/tests/model_fields/test_jsonfield.py\n--- a/tests/model_fields/test_jsonfield.py\n+++ b/tests/model_fields/test_jsonfield.py\n@@ -586,6 +586,10 @@ def test_isnull_key(self):\n NullableJSONModel.objects.filter(value__a__isnull=True),\n self.objs[:3] + self.objs[5:],\n )\n+ self.assertSequenceEqual(\n+ NullableJSONModel.objects.filter(value__j__isnull=True),\n+ self.objs[:4] + self.objs[5:],\n+ )\n self.assertSequenceEqual(\n NullableJSONModel.objects.filter(value__a__isnull=False),\n [self.objs[3], self.objs[4]],\n", + "fail_to_pass": "[\"test_isnull_key (model_fields.test_jsonfield.TestQuerying)\"]", + "pass_to_pass": "[\"test_custom_encoder_decoder (model_fields.test_jsonfield.JSONFieldTests)\", \"test_db_check_constraints (model_fields.test_jsonfield.JSONFieldTests)\", \"test_invalid_value (model_fields.test_jsonfield.JSONFieldTests)\", \"test_formfield (model_fields.test_jsonfield.TestFormField)\", \"test_formfield_custom_encoder_decoder (model_fields.test_jsonfield.TestFormField)\", \"test_custom_encoder (model_fields.test_jsonfield.TestValidation)\", \"test_invalid_decoder (model_fields.test_jsonfield.TestValidation)\", \"test_invalid_encoder (model_fields.test_jsonfield.TestValidation)\", \"test_validation_error (model_fields.test_jsonfield.TestValidation)\", \"test_deconstruct (model_fields.test_jsonfield.TestMethods)\", \"test_deconstruct_custom_encoder_decoder (model_fields.test_jsonfield.TestMethods)\", \"test_get_transforms (model_fields.test_jsonfield.TestMethods)\", \"test_key_transform_text_lookup_mixin_non_key_transform (model_fields.test_jsonfield.TestMethods)\", \"test_dumping (model_fields.test_jsonfield.TestSerialization)\", \"test_loading (model_fields.test_jsonfield.TestSerialization)\", \"test_xml_serialization (model_fields.test_jsonfield.TestSerialization)\", \"test_dict (model_fields.test_jsonfield.TestSaveLoad)\", \"test_json_null_different_from_sql_null (model_fields.test_jsonfield.TestSaveLoad)\", \"test_list (model_fields.test_jsonfield.TestSaveLoad)\", \"test_null (model_fields.test_jsonfield.TestSaveLoad)\", \"test_primitives (model_fields.test_jsonfield.TestSaveLoad)\", \"test_realistic_object (model_fields.test_jsonfield.TestSaveLoad)\", \"test_contained_by_unsupported (model_fields.test_jsonfield.TestQuerying)\", \"test_contains_unsupported (model_fields.test_jsonfield.TestQuerying)\", \"test_deep_lookup_array (model_fields.test_jsonfield.TestQuerying)\", \"test_deep_lookup_mixed (model_fields.test_jsonfield.TestQuerying)\", \"test_deep_lookup_objs (model_fields.test_jsonfield.TestQuerying)\", \"test_deep_lookup_transform (model_fields.test_jsonfield.TestQuerying)\", \"test_deep_values (model_fields.test_jsonfield.TestQuerying)\", \"test_exact (model_fields.test_jsonfield.TestQuerying)\", \"test_exact_complex (model_fields.test_jsonfield.TestQuerying)\", \"test_expression_wrapper_key_transform (model_fields.test_jsonfield.TestQuerying)\", \"test_has_any_keys (model_fields.test_jsonfield.TestQuerying)\", \"test_has_key (model_fields.test_jsonfield.TestQuerying)\", \"test_has_key_deep (model_fields.test_jsonfield.TestQuerying)\", \"test_has_key_list (model_fields.test_jsonfield.TestQuerying)\", \"test_has_key_null_value (model_fields.test_jsonfield.TestQuerying)\", \"test_has_keys (model_fields.test_jsonfield.TestQuerying)\", \"test_isnull (model_fields.test_jsonfield.TestQuerying)\", \"test_isnull_key_or_none (model_fields.test_jsonfield.TestQuerying)\", \"test_join_key_transform_annotation_expression (model_fields.test_jsonfield.TestQuerying)\", \"test_key_endswith (model_fields.test_jsonfield.TestQuerying)\", \"test_key_escape (model_fields.test_jsonfield.TestQuerying)\", \"test_key_icontains (model_fields.test_jsonfield.TestQuerying)\", \"test_key_iendswith (model_fields.test_jsonfield.TestQuerying)\", \"test_key_iexact (model_fields.test_jsonfield.TestQuerying)\", \"test_key_in (model_fields.test_jsonfield.TestQuerying)\", \"test_key_iregex (model_fields.test_jsonfield.TestQuerying)\", \"test_key_istartswith (model_fields.test_jsonfield.TestQuerying)\", \"test_key_quoted_string (model_fields.test_jsonfield.TestQuerying)\", \"test_key_regex (model_fields.test_jsonfield.TestQuerying)\", \"test_key_sql_injection_escape (model_fields.test_jsonfield.TestQuerying)\", \"test_key_startswith (model_fields.test_jsonfield.TestQuerying)\", \"test_key_transform_annotation_expression (model_fields.test_jsonfield.TestQuerying)\", \"test_key_transform_expression (model_fields.test_jsonfield.TestQuerying)\", \"test_key_transform_raw_expression (model_fields.test_jsonfield.TestQuerying)\", \"test_key_values (model_fields.test_jsonfield.TestQuerying)\", \"test_lookup_exclude (model_fields.test_jsonfield.TestQuerying)\", \"test_lookup_exclude_nonexistent_key (model_fields.test_jsonfield.TestQuerying)\", \"test_lookups_with_key_transform (model_fields.test_jsonfield.TestQuerying)\", \"test_nested_key_transform_annotation_expression (model_fields.test_jsonfield.TestQuerying)\", \"test_nested_key_transform_expression (model_fields.test_jsonfield.TestQuerying)\", \"test_nested_key_transform_on_subquery (model_fields.test_jsonfield.TestQuerying)\", \"test_nested_key_transform_raw_expression (model_fields.test_jsonfield.TestQuerying)\", \"test_none_key (model_fields.test_jsonfield.TestQuerying)\", \"test_none_key_and_exact_lookup (model_fields.test_jsonfield.TestQuerying)\", \"test_none_key_exclude (model_fields.test_jsonfield.TestQuerying)\", \"test_obj_subquery_lookup (model_fields.test_jsonfield.TestQuerying)\", \"test_order_grouping_custom_decoder (model_fields.test_jsonfield.TestQuerying)\", \"test_ordering_by_transform (model_fields.test_jsonfield.TestQuerying)\", \"test_ordering_grouping_by_count (model_fields.test_jsonfield.TestQuerying)\", \"test_ordering_grouping_by_key_transform (model_fields.test_jsonfield.TestQuerying)\", \"test_shallow_list_lookup (model_fields.test_jsonfield.TestQuerying)\", \"test_shallow_lookup_obj_target (model_fields.test_jsonfield.TestQuerying)\", \"test_shallow_obj_lookup (model_fields.test_jsonfield.TestQuerying)\", \"test_usage_in_subquery (model_fields.test_jsonfield.TestQuerying)\"]", + "expected_spans": { + "django/db/models/fields/json.py": [ + "KeyTransformIsNull", + "KeyTransformIsNull.as_oracle", + "KeyTransformIsNull.as_sqlite" + ] + }, + "test_file_spans": { + "tests/model_fields/test_jsonfield.py": [ + "TestQuerying.test_isnull_key" + ] + }, + "resolved_by": [ + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/db/models/fields/json.py": [ + "KeyTransformIsNull", + "KeyTransformIsNull.as_oracle", + "KeyTransformIsNull.as_sqlite" + ] + }, + "alternative_spans": { + "django/db/models/fields/json.py": [ + "KeyTransformIsNull", + "KeyTransformIsNull.as_oracle", + "KeyTransformIsNull.as_sqlite" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/models/fields/json.py": [ + "KeyTransformIsNull", + "KeyTransformIsNull.as_oracle", + "KeyTransformIsNull.as_sqlite" + ] + }, + "alternative_spans": { + "django/db/models/fields/json.py": [ + "KeyTransformIsNull", + "KeyTransformIsNull.as_oracle", + "KeyTransformIsNull.as_sqlite" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "django/db/models/fields/json.py": [ + "KeyTransformIsNull", + "KeyTransformIsNull.as_oracle", + "KeyTransformIsNull.as_sqlite", + "KeyTransformIn" + ] + }, + "alternative_spans": { + "django/db/models/fields/json.py": [ + "KeyTransformIsNull", + "KeyTransformIsNull.as_oracle", + "KeyTransformIsNull.as_sqlite", + "KeyTransformIn" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-13768", + "repo": "django/django", + "base_commit": "965d2d95c630939b53eb60d9c169f5dfc77ee0c6", + "problem_statement": "Log exceptions handled in Signal.send_robust()\nDescription\n\t\nAs pointed out by \u200bHaki Benita on Twitter, by default Signal.send_robust() doesn't have any log messages for exceptions raised in receivers. Since Django logs exceptions in other similar situations, such as missing template variables, I think it would be worth adding a logger.exception() call in the except clause of send_robust() . Users would then see such exceptions in their error handling tools, e.g. Sentry, and be able to figure out what action to take from there. Ultimately any *expected* exception should be caught with a try in the receiver function.\n", + "golden_patch": "diff --git a/django/dispatch/dispatcher.py b/django/dispatch/dispatcher.py\n--- a/django/dispatch/dispatcher.py\n+++ b/django/dispatch/dispatcher.py\n@@ -1,3 +1,4 @@\n+import logging\n import threading\n import warnings\n import weakref\n@@ -5,6 +6,8 @@\n from django.utils.deprecation import RemovedInDjango40Warning\n from django.utils.inspect import func_accepts_kwargs\n \n+logger = logging.getLogger('django.dispatch')\n+\n \n def _make_id(target):\n if hasattr(target, '__func__'):\n@@ -208,6 +211,12 @@ def send_robust(self, sender, **named):\n try:\n response = receiver(signal=self, sender=sender, **named)\n except Exception as err:\n+ logger.error(\n+ 'Error calling %s in Signal.send_robust() (%s)',\n+ receiver.__qualname__,\n+ err,\n+ exc_info=err,\n+ )\n responses.append((receiver, err))\n else:\n responses.append((receiver, response))\n", + "test_patch": "diff --git a/tests/dispatch/tests.py b/tests/dispatch/tests.py\n--- a/tests/dispatch/tests.py\n+++ b/tests/dispatch/tests.py\n@@ -165,13 +165,28 @@ def test_send_robust_fail(self):\n def fails(val, **kwargs):\n raise ValueError('this')\n a_signal.connect(fails)\n- result = a_signal.send_robust(sender=self, val=\"test\")\n- err = result[0][1]\n- self.assertIsInstance(err, ValueError)\n- self.assertEqual(err.args, ('this',))\n- self.assertTrue(hasattr(err, '__traceback__'))\n- self.assertIsInstance(err.__traceback__, TracebackType)\n- a_signal.disconnect(fails)\n+ try:\n+ with self.assertLogs('django.dispatch', 'ERROR') as cm:\n+ result = a_signal.send_robust(sender=self, val='test')\n+ err = result[0][1]\n+ self.assertIsInstance(err, ValueError)\n+ self.assertEqual(err.args, ('this',))\n+ self.assertIs(hasattr(err, '__traceback__'), True)\n+ self.assertIsInstance(err.__traceback__, TracebackType)\n+\n+ log_record = cm.records[0]\n+ self.assertEqual(\n+ log_record.getMessage(),\n+ 'Error calling '\n+ 'DispatcherTests.test_send_robust_fail..fails in '\n+ 'Signal.send_robust() (this)',\n+ )\n+ self.assertIsNotNone(log_record.exc_info)\n+ _, exc_value, _ = log_record.exc_info\n+ self.assertIsInstance(exc_value, ValueError)\n+ self.assertEqual(str(exc_value), 'this')\n+ finally:\n+ a_signal.disconnect(fails)\n self.assertTestIsClean(a_signal)\n \n def test_disconnection(self):\n", + "fail_to_pass": "[\"test_send_robust_fail (dispatch.tests.DispatcherTests)\"]", + "pass_to_pass": "[\"test_receiver_signal_list (dispatch.tests.ReceiverTestCase)\", \"test_receiver_single_signal (dispatch.tests.ReceiverTestCase)\", \"test_cached_garbaged_collected (dispatch.tests.DispatcherTests)\", \"test_cannot_connect_no_kwargs (dispatch.tests.DispatcherTests)\", \"test_cannot_connect_non_callable (dispatch.tests.DispatcherTests)\", \"test_disconnection (dispatch.tests.DispatcherTests)\", \"test_garbage_collected (dispatch.tests.DispatcherTests)\", \"test_has_listeners (dispatch.tests.DispatcherTests)\", \"test_multiple_registration (dispatch.tests.DispatcherTests)\", \"test_send (dispatch.tests.DispatcherTests)\", \"test_send_connected_no_sender (dispatch.tests.DispatcherTests)\", \"test_send_different_no_sender (dispatch.tests.DispatcherTests)\", \"test_send_no_receivers (dispatch.tests.DispatcherTests)\", \"test_send_robust_ignored_sender (dispatch.tests.DispatcherTests)\", \"test_send_robust_no_receivers (dispatch.tests.DispatcherTests)\", \"test_send_robust_success (dispatch.tests.DispatcherTests)\", \"test_uid_registration (dispatch.tests.DispatcherTests)\", \"test_values_returned_by_disconnection (dispatch.tests.DispatcherTests)\"]", + "expected_spans": { + "django/dispatch/dispatcher.py": [ + "imports", + "Signal.send_robust" + ] + }, + "test_file_spans": { + "tests/dispatch/tests.py": [ + "DispatcherTests.test_send_robust_fail" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/dispatch/dispatcher.py": [ + "imports", + "Signal.send_robust" + ] + }, + "alternative_spans": { + "django/dispatch/dispatcher.py": [ + "imports", + "Signal.send_robust" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-20590", + "repo": "sympy/sympy", + "base_commit": "cffd4e0f86fefd4802349a9f9b19ed70934ea354", + "problem_statement": "Symbol instances have __dict__ since 1.7?\nIn version 1.6.2 Symbol instances had no `__dict__` attribute\r\n```python\r\n>>> sympy.Symbol('s').__dict__\r\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\n in \r\n----> 1 sympy.Symbol('s').__dict__\r\n\r\nAttributeError: 'Symbol' object has no attribute '__dict__'\r\n>>> sympy.Symbol('s').__slots__\r\n('name',)\r\n```\r\n\r\nThis changes in 1.7 where `sympy.Symbol('s').__dict__` now exists (and returns an empty dict)\r\nI may misinterpret this, but given the purpose of `__slots__`, I assume this is a bug, introduced because some parent class accidentally stopped defining `__slots__`.\n", + "golden_patch": "diff --git a/sympy/core/_print_helpers.py b/sympy/core/_print_helpers.py\n--- a/sympy/core/_print_helpers.py\n+++ b/sympy/core/_print_helpers.py\n@@ -17,6 +17,11 @@ class Printable:\n This also adds support for LaTeX printing in jupyter notebooks.\n \"\"\"\n \n+ # Since this class is used as a mixin we set empty slots. That means that\n+ # instances of any subclasses that use slots will not need to have a\n+ # __dict__.\n+ __slots__ = ()\n+\n # Note, we always use the default ordering (lex) in __str__ and __repr__,\n # regardless of the global setting. See issue 5487.\n def __str__(self):\n", + "test_patch": "diff --git a/sympy/core/tests/test_basic.py b/sympy/core/tests/test_basic.py\n--- a/sympy/core/tests/test_basic.py\n+++ b/sympy/core/tests/test_basic.py\n@@ -34,6 +34,12 @@ def test_structure():\n assert bool(b1)\n \n \n+def test_immutable():\n+ assert not hasattr(b1, '__dict__')\n+ with raises(AttributeError):\n+ b1.x = 1\n+\n+\n def test_equality():\n instances = [b1, b2, b3, b21, Basic(b1, b1, b1), Basic]\n for i, b_i in enumerate(instances):\n", + "fail_to_pass": "[\"test_immutable\"]", + "pass_to_pass": "[\"test__aresame\", \"test_structure\", \"test_equality\", \"test_matches_basic\", \"test_has\", \"test_subs\", \"test_subs_with_unicode_symbols\", \"test_atoms\", \"test_free_symbols_empty\", \"test_doit\", \"test_S\", \"test_xreplace\", \"test_preorder_traversal\", \"test_sorted_args\", \"test_call\", \"test_rewrite\", \"test_literal_evalf_is_number_is_zero_is_comparable\", \"test_as_Basic\", \"test_atomic\", \"test_as_dummy\", \"test_canonical_variables\"]", + "expected_spans": { + "sympy/core/_print_helpers.py": [ + "Printable" + ] + }, + "test_file_spans": { + "sympy/core/tests/test_basic.py": [ + "test_equality" + ] + }, + "resolved_by": [ + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sympy/core/_print_helpers.py": [ + "docstring" + ] + }, + "alternative_spans": { + "sympy/core/_print_helpers.py": [ + "docstring" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sympy/core/_print_helpers.py": [ + "Printable" + ] + }, + "alternative_spans": { + "sympy/core/_print_helpers.py": [ + "Printable" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sympy/core/_print_helpers.py": [ + "Printable" + ], + "sympy/core/core.py": [ + "BasicMeta.__init__" + ] + }, + "alternative_spans": { + "sympy/core/_print_helpers.py": [ + "Printable" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240908_infant_gpt4o", + "spans": { + "sympy/core/_print_helpers.py": [ + "docstring" + ] + } + } + ] + }, + { + "instance_id": "sympy__sympy-20639", + "repo": "sympy/sympy", + "base_commit": "eb926a1d0c1158bf43f01eaf673dc84416b5ebb1", + "problem_statement": "inaccurate rendering of pi**(1/E)\nThis claims to be version 1.5.dev; I just merged from the project master, so I hope this is current. I didn't notice this bug among others in printing.pretty.\r\n\r\n```\r\nIn [52]: pi**(1/E) \r\nOut[52]: \r\n-1___\r\n\u2572\u2571 \u03c0 \r\n\r\n```\r\nLaTeX and str not fooled:\r\n```\r\nIn [53]: print(latex(pi**(1/E))) \r\n\\pi^{e^{-1}}\r\n\r\nIn [54]: str(pi**(1/E)) \r\nOut[54]: 'pi**exp(-1)'\r\n```\r\n\n", + "golden_patch": "diff --git a/sympy/printing/pretty/pretty.py b/sympy/printing/pretty/pretty.py\n--- a/sympy/printing/pretty/pretty.py\n+++ b/sympy/printing/pretty/pretty.py\n@@ -1902,12 +1902,12 @@ def _print_Mul(self, product):\n return prettyForm.__mul__(*a)/prettyForm.__mul__(*b)\n \n # A helper function for _print_Pow to print x**(1/n)\n- def _print_nth_root(self, base, expt):\n+ def _print_nth_root(self, base, root):\n bpretty = self._print(base)\n \n # In very simple cases, use a single-char root sign\n if (self._settings['use_unicode_sqrt_char'] and self._use_unicode\n- and expt is S.Half and bpretty.height() == 1\n+ and root == 2 and bpretty.height() == 1\n and (bpretty.width() == 1\n or (base.is_Integer and base.is_nonnegative))):\n return prettyForm(*bpretty.left('\\N{SQUARE ROOT}'))\n@@ -1915,14 +1915,13 @@ def _print_nth_root(self, base, expt):\n # Construct root sign, start with the \\/ shape\n _zZ = xobj('/', 1)\n rootsign = xobj('\\\\', 1) + _zZ\n- # Make exponent number to put above it\n- if isinstance(expt, Rational):\n- exp = str(expt.q)\n- if exp == '2':\n- exp = ''\n- else:\n- exp = str(expt.args[0])\n- exp = exp.ljust(2)\n+ # Constructing the number to put on root\n+ rpretty = self._print(root)\n+ # roots look bad if they are not a single line\n+ if rpretty.height() != 1:\n+ return self._print(base)**self._print(1/root)\n+ # If power is half, no number should appear on top of root sign\n+ exp = '' if root == 2 else str(rpretty).ljust(2)\n if len(exp) > 2:\n rootsign = ' '*(len(exp) - 2) + rootsign\n # Stack the exponent\n@@ -1954,8 +1953,9 @@ def _print_Pow(self, power):\n if e is S.NegativeOne:\n return prettyForm(\"1\")/self._print(b)\n n, d = fraction(e)\n- if n is S.One and d.is_Atom and not e.is_Integer and self._settings['root_notation']:\n- return self._print_nth_root(b, e)\n+ if n is S.One and d.is_Atom and not e.is_Integer and (e.is_Rational or d.is_Symbol) \\\n+ and self._settings['root_notation']:\n+ return self._print_nth_root(b, d)\n if e.is_Rational and e < 0:\n return prettyForm(\"1\")/self._print(Pow(b, -e, evaluate=False))\n \n", + "test_patch": "diff --git a/sympy/printing/pretty/tests/test_pretty.py b/sympy/printing/pretty/tests/test_pretty.py\n--- a/sympy/printing/pretty/tests/test_pretty.py\n+++ b/sympy/printing/pretty/tests/test_pretty.py\n@@ -5942,7 +5942,11 @@ def test_PrettyPoly():\n \n def test_issue_6285():\n assert pretty(Pow(2, -5, evaluate=False)) == '1 \\n--\\n 5\\n2 '\n- assert pretty(Pow(x, (1/pi))) == 'pi___\\n\\\\/ x '\n+ assert pretty(Pow(x, (1/pi))) == \\\n+ ' 1 \\n'\\\n+ ' --\\n'\\\n+ ' pi\\n'\\\n+ 'x '\n \n \n def test_issue_6359():\n@@ -7205,6 +7209,51 @@ def test_is_combining():\n [False, True, False, False]\n \n \n+def test_issue_17616():\n+ assert pretty(pi**(1/exp(1))) == \\\n+ ' / -1\\\\\\n'\\\n+ ' \\e /\\n'\\\n+ 'pi '\n+\n+ assert upretty(pi**(1/exp(1))) == \\\n+ ' \u239b -1\u239e\\n'\\\n+ ' \u239d\u212f \u23a0\\n'\\\n+ '\u03c0 '\n+\n+ assert pretty(pi**(1/pi)) == \\\n+ ' 1 \\n'\\\n+ ' --\\n'\\\n+ ' pi\\n'\\\n+ 'pi '\n+\n+ assert upretty(pi**(1/pi)) == \\\n+ ' 1\\n'\\\n+ ' \u2500\\n'\\\n+ ' \u03c0\\n'\\\n+ '\u03c0 '\n+\n+ assert pretty(pi**(1/EulerGamma)) == \\\n+ ' 1 \\n'\\\n+ ' ----------\\n'\\\n+ ' EulerGamma\\n'\\\n+ 'pi '\n+\n+ assert upretty(pi**(1/EulerGamma)) == \\\n+ ' 1\\n'\\\n+ ' \u2500\\n'\\\n+ ' \u03b3\\n'\\\n+ '\u03c0 '\n+\n+ z = Symbol(\"x_17\")\n+ assert upretty(7**(1/z)) == \\\n+ 'x\u2081\u2087___\\n'\\\n+ ' \u2572\u2571 7 '\n+\n+ assert pretty(7**(1/z)) == \\\n+ 'x_17___\\n'\\\n+ ' \\\\/ 7 '\n+\n+\n def test_issue_17857():\n assert pretty(Range(-oo, oo)) == '{..., -1, 0, 1, ...}'\n assert pretty(Range(oo, -oo, -1)) == '{..., 1, 0, -1, ...}'\n", + "fail_to_pass": "[\"test_issue_6285\", \"test_issue_17616\"]", + "pass_to_pass": "[\"test_pretty_ascii_str\", \"test_pretty_unicode_str\", \"test_upretty_greek\", \"test_upretty_multiindex\", \"test_upretty_sub_super\", \"test_upretty_subs_missing_in_24\", \"test_missing_in_2X_issue_9047\", \"test_upretty_modifiers\", \"test_pretty_Cycle\", \"test_pretty_Permutation\", \"test_pretty_basic\", \"test_negative_fractions\", \"test_issue_5524\", \"test_pretty_ordering\", \"test_EulerGamma\", \"test_GoldenRatio\", \"test_pretty_relational\", \"test_Assignment\", \"test_AugmentedAssignment\", \"test_pretty_rational\", \"test_pretty_functions\", \"test_pretty_sqrt\", \"test_pretty_sqrt_char_knob\", \"test_pretty_sqrt_longsymbol_no_sqrt_char\", \"test_pretty_KroneckerDelta\", \"test_pretty_product\", \"test_pretty_Lambda\", \"test_pretty_TransferFunction\", \"test_pretty_Series\", \"test_pretty_Parallel\", \"test_pretty_Feedback\", \"test_pretty_order\", \"test_pretty_derivatives\", \"test_pretty_integrals\", \"test_pretty_matrix\", \"test_pretty_ndim_arrays\", \"test_tensor_TensorProduct\", \"test_diffgeom_print_WedgeProduct\", \"test_Adjoint\", \"test_pretty_Trace_issue_9044\", \"test_MatrixSlice\", \"test_MatrixExpressions\", \"test_pretty_dotproduct\", \"test_pretty_piecewise\", \"test_pretty_ITE\", \"test_pretty_seq\", \"test_any_object_in_sequence\", \"test_print_builtin_set\", \"test_pretty_sets\", \"test_pretty_SetExpr\", \"test_pretty_ImageSet\", \"test_pretty_ConditionSet\", \"test_pretty_ComplexRegion\", \"test_pretty_Union_issue_10414\", \"test_pretty_Intersection_issue_10414\", \"test_ProductSet_exponent\", \"test_ProductSet_parenthesis\", \"test_ProductSet_prod_char_issue_10413\", \"test_pretty_sequences\", \"test_pretty_FourierSeries\", \"test_pretty_FormalPowerSeries\", \"test_pretty_limits\", \"test_pretty_ComplexRootOf\", \"test_pretty_RootSum\", \"test_GroebnerBasis\", \"test_pretty_UniversalSet\", \"test_pretty_Boolean\", \"test_pretty_Domain\", \"test_pretty_prec\", \"test_pprint\", \"test_pretty_class\", \"test_pretty_no_wrap_line\", \"test_settings\", \"test_pretty_sum\", \"test_units\", \"test_pretty_Subs\", \"test_gammas\", \"test_beta\", \"test_function_subclass_different_name\", \"test_SingularityFunction\", \"test_deltas\", \"test_hyper\", \"test_meijerg\", \"test_noncommutative\", \"test_pretty_special_functions\", \"test_pretty_geometry\", \"test_expint\", \"test_elliptic_functions\", \"test_RandomDomain\", \"test_PrettyPoly\", \"test_issue_6359\", \"test_issue_6739\", \"test_complicated_symbol_unchanged\", \"test_categories\", \"test_PrettyModules\", \"test_QuotientRing\", \"test_Homomorphism\", \"test_Tr\", \"test_pretty_Add\", \"test_issue_7179\", \"test_issue_7180\", \"test_pretty_Complement\", \"test_pretty_SymmetricDifference\", \"test_pretty_Contains\", \"test_issue_8292\", \"test_issue_4335\", \"test_issue_8344\", \"test_issue_6324\", \"test_issue_7927\", \"test_issue_6134\", \"test_issue_9877\", \"test_issue_13651\", \"test_pretty_primenu\", \"test_pretty_primeomega\", \"test_pretty_Mod\", \"test_issue_11801\", \"test_pretty_UnevaluatedExpr\", \"test_issue_10472\", \"test_MatrixElement_printing\", \"test_issue_12675\", \"test_MatrixSymbol_printing\", \"test_degree_printing\", \"test_vector_expr_pretty_printing\", \"test_pretty_print_tensor_expr\", \"test_pretty_print_tensor_partial_deriv\", \"test_issue_15560\", \"test_print_lerchphi\", \"test_issue_15583\", \"test_matrixSymbolBold\", \"test_center_accent\", \"test_imaginary_unit\", \"test_str_special_matrices\", \"test_pretty_misc_functions\", \"test_hadamard_power\", \"test_issue_17258\", \"test_is_combining\", \"test_issue_17857\", \"test_issue_18272\", \"test_Str\"]", + "expected_spans": { + "sympy/printing/pretty/pretty.py": [ + "PrettyPrinter._print_nth_root", + "PrettyPrinter._print_Pow" + ] + }, + "test_file_spans": { + "sympy/printing/pretty/tests/test_pretty.py": [ + "test_issue_6285", + "test_issue_17857" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "sphinx-doc__sphinx-8595", + "repo": "sphinx-doc/sphinx", + "base_commit": "b19bce971e82f2497d67fdacdeca8db08ae0ba56", + "problem_statement": "autodoc: empty __all__ attribute is ignored\n**Describe the bug**\r\nautodoc: empty `__all__` attribute is ignored\r\n\r\n**To Reproduce**\r\n```\r\n# example.py\r\n__all__ = []\r\n\r\n\r\ndef foo():\r\n \"docstring\"\r\n\r\n\r\ndef bar():\r\n \"docstring\"\r\n\r\n\r\ndef baz():\r\n \"docstring\"\r\n```\r\n```\r\n# index.rst\r\n.. automodule:: example\r\n :members:\r\n```\r\n\r\nAll foo, bar, and baz are shown.\r\n\r\n**Expected behavior**\r\nNo entries should be shown because `__all__` is empty.\r\n\r\n**Your project**\r\nNo\r\n\r\n**Screenshots**\r\nNo\r\n\r\n**Environment info**\r\n- OS: Mac\r\n- Python version: 3.9.1\r\n- Sphinx version: HEAD of 3.x\r\n- Sphinx extensions: sphinx.ext.autodoc\r\n- Extra tools: No\r\n\r\n**Additional context**\r\nNo\n", + "golden_patch": "diff --git a/sphinx/ext/autodoc/__init__.py b/sphinx/ext/autodoc/__init__.py\n--- a/sphinx/ext/autodoc/__init__.py\n+++ b/sphinx/ext/autodoc/__init__.py\n@@ -1074,7 +1074,7 @@ def get_module_members(self) -> Dict[str, ObjectMember]:\n def get_object_members(self, want_all: bool) -> Tuple[bool, ObjectMembers]:\n members = self.get_module_members()\n if want_all:\n- if not self.__all__:\n+ if self.__all__ is None:\n # for implicit module members, check __module__ to avoid\n # documenting imported objects\n return True, list(members.values())\n", + "test_patch": "diff --git a/tests/roots/test-ext-autodoc/target/empty_all.py b/tests/roots/test-ext-autodoc/target/empty_all.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/roots/test-ext-autodoc/target/empty_all.py\n@@ -0,0 +1,16 @@\n+\"\"\"\n+docsting of empty_all module.\n+\"\"\"\n+__all__ = []\n+\n+\n+def foo():\n+ \"\"\"docstring\"\"\"\n+\n+\n+def bar():\n+ \"\"\"docstring\"\"\"\n+\n+\n+def baz():\n+ \"\"\"docstring\"\"\"\ndiff --git a/tests/test_ext_autodoc_automodule.py b/tests/test_ext_autodoc_automodule.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/test_ext_autodoc_automodule.py\n@@ -0,0 +1,27 @@\n+\"\"\"\n+ test_ext_autodoc_autocmodule\n+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+ Test the autodoc extension. This tests mainly the Documenters; the auto\n+ directives are tested in a test source file translated by test_build.\n+\n+ :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.\n+ :license: BSD, see LICENSE for details.\n+\"\"\"\n+\n+import pytest\n+\n+from .test_ext_autodoc import do_autodoc\n+\n+\n+@pytest.mark.sphinx('html', testroot='ext-autodoc')\n+def test_empty_all(app):\n+ options = {'members': True}\n+ actual = do_autodoc(app, 'module', 'target.empty_all', options)\n+ assert list(actual) == [\n+ '',\n+ '.. py:module:: target.empty_all',\n+ '',\n+ 'docsting of empty_all module.',\n+ '',\n+ ]\n", + "fail_to_pass": "[\"tests/test_ext_autodoc_automodule.py::test_empty_all\"]", + "pass_to_pass": "[]", + "expected_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + }, + "test_file_spans": {}, + "resolved_by": [ + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_module_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_module_members" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.import_object", + "ModuleDocumenter.get_object_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.import_object", + "ModuleDocumenter.get_object_members" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "imports", + "identity", + "_All", + "_All.__contains__", + "_Empty", + "_Empty.__contains__", + "members_option", + "members_set_option", + "exclude_members_option", + "inherited_members_option", + "member_order_option", + "annotation_option", + "bool_option", + "merge_special_members_option", + "merge_members_option", + "cut_lines", + "between", + "Options", + "Options.__getattr__", + "ObjectMember.__new__", + "ObjectMember.__init__", + "Documenter", + "Documenter.get_attr", + "Documenter.can_document_member", + "Documenter.__init__", + "Documenter.documenters", + "Documenter.add_line", + "Documenter.resolve_name", + "Documenter.parse_name", + "Documenter.import_object", + "Documenter.get_real_modname", + "Documenter.check_module", + "Documenter.format_args", + "Documenter.format_name", + "Documenter._call_format_args", + "Documenter.format_signature", + "Documenter.add_directive_header", + "Documenter.get_doc", + "Documenter.process_doc", + "Documenter.get_sourcename", + "Documenter.add_content", + "Documenter.get_object_members", + "Documenter.filter_members", + "Documenter.document_members", + "Documenter.sort_members", + "Documenter.generate", + "ModuleDocumenter", + "ModuleDocumenter.__init__", + "ModuleDocumenter.can_document_member", + "ModuleDocumenter.resolve_name", + "ModuleDocumenter.parse_name", + "ModuleDocumenter.import_object", + "ModuleDocumenter.add_directive_header", + "ModuleDocumenter.get_module_members", + "ModuleDocumenter.get_object_members", + "ModuleDocumenter.sort_members", + "ModuleLevelDocumenter.resolve_name", + "ClassLevelDocumenter.resolve_name", + "DocstringSignatureMixin", + "DocstringSignatureMixin._find_signature", + "DocstringSignatureMixin.get_doc", + "DocstringSignatureMixin.format_signature", + "DocstringStripSignatureMixin.format_signature", + "FunctionDocumenter", + "FunctionDocumenter.can_document_member", + "FunctionDocumenter.format_args", + "FunctionDocumenter.document_members", + "FunctionDocumenter.add_directive_header", + "FunctionDocumenter.format_signature", + "FunctionDocumenter.annotate_to_first_argument", + "DecoratorDocumenter", + "DecoratorDocumenter.format_args", + "impl:26", + "ClassDocumenter", + "ClassDocumenter.__init__", + "ClassDocumenter.can_document_member", + "ClassDocumenter.import_object", + "ClassDocumenter._get_signature", + "ClassDocumenter.format_args", + "ClassDocumenter.format_signature", + "ClassDocumenter.get_overloaded_signatures", + "ClassDocumenter.add_directive_header", + "ClassDocumenter.get_object_members", + "ClassDocumenter.get_doc", + "ClassDocumenter.add_content", + "ClassDocumenter.document_members", + "ClassDocumenter.generate", + "ExceptionDocumenter", + "ExceptionDocumenter.can_document_member", + "DataDocumenterMixinBase", + "DataDocumenterMixinBase.should_suppress_directive_header", + "DataDocumenterMixinBase.should_suppress_value_header", + "DataDocumenterMixinBase.update_content", + "GenericAliasMixin.should_suppress_directive_header", + "GenericAliasMixin.update_content", + "NewTypeMixin.should_suppress_directive_header", + "NewTypeMixin.update_content", + "TypeVarMixin.should_suppress_directive_header", + "TypeVarMixin.get_doc", + "TypeVarMixin.update_content", + "UninitializedGlobalVariableMixin.import_object", + "UninitializedGlobalVariableMixin.should_suppress_value_header", + "UninitializedGlobalVariableMixin.get_doc", + "DataDocumenter", + "DataDocumenter.can_document_member", + "DataDocumenter.update_annotations", + "DataDocumenter.import_object", + "DataDocumenter.should_suppress_value_header", + "DataDocumenter.add_directive_header", + "DataDocumenter.document_members", + "DataDocumenter.get_real_modname", + "DataDocumenter.get_module_comment", + "DataDocumenter.get_doc", + "DataDocumenter.add_content", + "NewTypeDataDocumenter", + "NewTypeDataDocumenter.can_document_member", + "MethodDocumenter", + "MethodDocumenter.can_document_member", + "MethodDocumenter.import_object", + "MethodDocumenter.format_args", + "MethodDocumenter.add_directive_header", + "MethodDocumenter.document_members", + "MethodDocumenter.format_signature", + "MethodDocumenter.annotate_to_first_argument", + "NonDataDescriptorMixin", + "NonDataDescriptorMixin.should_suppress_value_header", + "NonDataDescriptorMixin.get_doc", + "SlotsMixin.isslotsattribute", + "SlotsMixin.import_object", + "SlotsMixin.should_suppress_directive_header", + "SlotsMixin.get_doc", + "SlotsMixin", + "RuntimeInstanceAttributeMixin", + "RuntimeInstanceAttributeMixin.is_runtime_instance_attribute", + "RuntimeInstanceAttributeMixin.import_object", + "RuntimeInstanceAttributeMixin.should_suppress_value_header", + "UninitializedInstanceAttributeMixin.is_uninitialized_instance_attribute", + "UninitializedInstanceAttributeMixin.import_object", + "UninitializedInstanceAttributeMixin.should_suppress_value_header", + "AttributeDocumenter", + "AttributeDocumenter.is_function_or_method", + "AttributeDocumenter.can_document_member", + "AttributeDocumenter.document_members", + "AttributeDocumenter.isinstanceattribute", + "AttributeDocumenter.update_annotations", + "AttributeDocumenter.import_object", + "AttributeDocumenter.get_real_modname", + "AttributeDocumenter.should_suppress_value_header", + "AttributeDocumenter.add_directive_header", + "AttributeDocumenter.get_attribute_comment", + "AttributeDocumenter.get_doc", + "AttributeDocumenter.add_content", + "PropertyDocumenter", + "PropertyDocumenter.can_document_member", + "PropertyDocumenter.document_members", + "PropertyDocumenter.get_real_modname", + "PropertyDocumenter.add_directive_header", + "NewTypeAttributeDocumenter", + "NewTypeAttributeDocumenter.can_document_member", + "get_documenters", + "autodoc_attrgetter", + "migrate_autodoc_member_order", + "imports:21", + "setup" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "imports", + "identity", + "_All", + "_All.__contains__", + "_Empty", + "_Empty.__contains__", + "members_option", + "members_set_option", + "exclude_members_option", + "inherited_members_option", + "member_order_option", + "annotation_option", + "bool_option", + "merge_special_members_option", + "merge_members_option", + "cut_lines", + "between", + "Options", + "Options.__getattr__", + "ObjectMember.__new__", + "ObjectMember.__init__", + "Documenter", + "Documenter.get_attr", + "Documenter.can_document_member", + "Documenter.__init__", + "Documenter.documenters", + "Documenter.add_line", + "Documenter.resolve_name", + "Documenter.parse_name", + "Documenter.import_object", + "Documenter.get_real_modname", + "Documenter.check_module", + "Documenter.format_args", + "Documenter.format_name", + "Documenter._call_format_args", + "Documenter.format_signature", + "Documenter.add_directive_header", + "Documenter.get_doc", + "Documenter.process_doc", + "Documenter.get_sourcename", + "Documenter.add_content", + "Documenter.get_object_members", + "Documenter.filter_members", + "Documenter.document_members", + "Documenter.sort_members", + "Documenter.generate", + "ModuleDocumenter", + "ModuleDocumenter.__init__", + "ModuleDocumenter.can_document_member", + "ModuleDocumenter.resolve_name", + "ModuleDocumenter.parse_name", + "ModuleDocumenter.import_object", + "ModuleDocumenter.add_directive_header", + "ModuleDocumenter.get_module_members", + "ModuleDocumenter.get_object_members", + "ModuleDocumenter.sort_members", + "ModuleLevelDocumenter.resolve_name", + "ClassLevelDocumenter.resolve_name", + "DocstringSignatureMixin", + "DocstringSignatureMixin._find_signature", + "DocstringSignatureMixin.get_doc", + "DocstringSignatureMixin.format_signature", + "DocstringStripSignatureMixin.format_signature", + "FunctionDocumenter", + "FunctionDocumenter.can_document_member", + "FunctionDocumenter.format_args", + "FunctionDocumenter.document_members", + "FunctionDocumenter.add_directive_header", + "FunctionDocumenter.format_signature", + "FunctionDocumenter.annotate_to_first_argument", + "DecoratorDocumenter", + "DecoratorDocumenter.format_args", + "impl:26", + "ClassDocumenter", + "ClassDocumenter.__init__", + "ClassDocumenter.can_document_member", + "ClassDocumenter.import_object", + "ClassDocumenter._get_signature", + "ClassDocumenter.format_args", + "ClassDocumenter.format_signature", + "ClassDocumenter.get_overloaded_signatures", + "ClassDocumenter.add_directive_header", + "ClassDocumenter.get_object_members", + "ClassDocumenter.get_doc", + "ClassDocumenter.add_content", + "ClassDocumenter.document_members", + "ClassDocumenter.generate", + "ExceptionDocumenter", + "ExceptionDocumenter.can_document_member", + "DataDocumenterMixinBase", + "DataDocumenterMixinBase.should_suppress_directive_header", + "DataDocumenterMixinBase.should_suppress_value_header", + "DataDocumenterMixinBase.update_content", + "GenericAliasMixin.should_suppress_directive_header", + "GenericAliasMixin.update_content", + "NewTypeMixin.should_suppress_directive_header", + "NewTypeMixin.update_content", + "TypeVarMixin.should_suppress_directive_header", + "TypeVarMixin.get_doc", + "TypeVarMixin.update_content", + "UninitializedGlobalVariableMixin.import_object", + "UninitializedGlobalVariableMixin.should_suppress_value_header", + "UninitializedGlobalVariableMixin.get_doc", + "DataDocumenter", + "DataDocumenter.can_document_member", + "DataDocumenter.update_annotations", + "DataDocumenter.import_object", + "DataDocumenter.should_suppress_value_header", + "DataDocumenter.add_directive_header", + "DataDocumenter.document_members", + "DataDocumenter.get_real_modname", + "DataDocumenter.get_module_comment", + "DataDocumenter.get_doc", + "DataDocumenter.add_content", + "NewTypeDataDocumenter", + "NewTypeDataDocumenter.can_document_member", + "MethodDocumenter", + "MethodDocumenter.can_document_member", + "MethodDocumenter.import_object", + "MethodDocumenter.format_args", + "MethodDocumenter.add_directive_header", + "MethodDocumenter.document_members", + "MethodDocumenter.format_signature", + "MethodDocumenter.annotate_to_first_argument", + "NonDataDescriptorMixin", + "NonDataDescriptorMixin.should_suppress_value_header", + "NonDataDescriptorMixin.get_doc", + "SlotsMixin.isslotsattribute", + "SlotsMixin.import_object", + "SlotsMixin.should_suppress_directive_header", + "SlotsMixin.get_doc", + "SlotsMixin", + "RuntimeInstanceAttributeMixin", + "RuntimeInstanceAttributeMixin.is_runtime_instance_attribute", + "RuntimeInstanceAttributeMixin.import_object", + "RuntimeInstanceAttributeMixin.should_suppress_value_header", + "UninitializedInstanceAttributeMixin.is_uninitialized_instance_attribute", + "UninitializedInstanceAttributeMixin.import_object", + "UninitializedInstanceAttributeMixin.should_suppress_value_header", + "AttributeDocumenter", + "AttributeDocumenter.is_function_or_method", + "AttributeDocumenter.can_document_member", + "AttributeDocumenter.document_members", + "AttributeDocumenter.isinstanceattribute", + "AttributeDocumenter.update_annotations", + "AttributeDocumenter.import_object", + "AttributeDocumenter.get_real_modname", + "AttributeDocumenter.should_suppress_value_header", + "AttributeDocumenter.add_directive_header", + "AttributeDocumenter.get_attribute_comment", + "AttributeDocumenter.get_doc", + "AttributeDocumenter.add_content", + "PropertyDocumenter", + "PropertyDocumenter.can_document_member", + "PropertyDocumenter.document_members", + "PropertyDocumenter.get_real_modname", + "PropertyDocumenter.add_directive_header", + "NewTypeAttributeDocumenter", + "NewTypeAttributeDocumenter.can_document_member", + "get_documenters", + "autodoc_attrgetter", + "migrate_autodoc_member_order", + "imports:21", + "setup" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "Documenter.document_members", + "ModuleDocumenter.get_object_members", + "ModuleDocumenter.sort_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "Documenter.document_members", + "ModuleDocumenter.get_object_members", + "ModuleDocumenter.sort_members" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.import_object", + "ModuleDocumenter.get_object_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.import_object", + "ModuleDocumenter.get_object_members" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "Documenter.get_object_members", + "Documenter.filter_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "Documenter.get_object_members", + "Documenter.filter_members" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "Documenter.get_object_members", + "Documenter.filter_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "Documenter.get_object_members", + "Documenter.filter_members" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.import_object", + "ModuleDocumenter.get_object_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.import_object", + "ModuleDocumenter.get_object_members" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ], + "tests/test_ext_autodoc.py": [ + "test_autodoc_process_signature_typehints" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_object_members" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240524_opencsg_starship_gpt4", + "spans": { + "sphinx/ext/autodoc/__init__.py": [ + "ModuleDocumenter.get_module_members" + ] + } + }, + { + "run_name": "20240829_Isoform", + "spans": { + "sphinx/ext/autodoc/__init__.py": [ + "Documenter.get_object_members", + "Documenter.filter_members" + ] + } + }, + { + "run_name": "20240621_autocoderover-v20240620", + "spans": { + "sphinx/ext/autodoc/__init__.py": [ + "Documenter.get_object_members", + "Documenter.filter_members" + ] + } + } + ] + }, + { + "instance_id": "sphinx-doc__sphinx-8627", + "repo": "sphinx-doc/sphinx", + "base_commit": "332d80ba8433aea41c3709fa52737ede4405072b", + "problem_statement": "autodoc isn't able to resolve struct.Struct type annotations\n**Describe the bug**\r\nIf `struct.Struct` is declared in any type annotations, I get `class reference target not found: Struct`\r\n\r\n**To Reproduce**\r\nSimple `index.rst`\r\n```\r\nHello World\r\n===========\r\n\r\ncode docs\r\n=========\r\n\r\n.. automodule:: helloworld.helloworld\r\n```\r\n\r\nSimple `helloworld.py`\r\n```\r\nimport struct\r\nimport pathlib\r\n\r\ndef consume_struct(_: struct.Struct) -> None:\r\n pass\r\n\r\ndef make_struct() -> struct.Struct:\r\n mystruct = struct.Struct('HH')\r\n return mystruct\r\n\r\ndef make_path() -> pathlib.Path:\r\n return pathlib.Path()\r\n```\r\n\r\nCommand line:\r\n```\r\npython3 -m sphinx -b html docs/ doc-out -nvWT\r\n```\r\n\r\n**Expected behavior**\r\nIf you comment out the 2 functions that have `Struct` type annotations, you'll see that `pathlib.Path` resolves fine and shows up in the resulting documentation. I'd expect that `Struct` would also resolve correctly.\r\n\r\n**Your project**\r\nn/a\r\n\r\n**Screenshots**\r\nn/a\r\n\r\n**Environment info**\r\n- OS: Ubuntu 18.04, 20.04\r\n- Python version: 3.8.2\r\n- Sphinx version: 3.2.1\r\n- Sphinx extensions: 'sphinx.ext.autodoc',\r\n 'sphinx.ext.autosectionlabel',\r\n 'sphinx.ext.intersphinx',\r\n 'sphinx.ext.doctest',\r\n 'sphinx.ext.todo'\r\n- Extra tools: \r\n\r\n**Additional context**\r\n\r\n\r\n- [e.g. URL or Ticket]\r\n\r\n\n", + "golden_patch": "diff --git a/sphinx/util/typing.py b/sphinx/util/typing.py\n--- a/sphinx/util/typing.py\n+++ b/sphinx/util/typing.py\n@@ -10,6 +10,7 @@\n \n import sys\n import typing\n+from struct import Struct\n from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, TypeVar, Union\n \n from docutils import nodes\n@@ -94,6 +95,9 @@ def restify(cls: Optional[\"Type\"]) -> str:\n return ':obj:`None`'\n elif cls is Ellipsis:\n return '...'\n+ elif cls is Struct:\n+ # Before Python 3.9, struct.Struct class has incorrect __module__.\n+ return ':class:`struct.Struct`'\n elif inspect.isNewType(cls):\n return ':class:`%s`' % cls.__name__\n elif cls.__module__ in ('__builtin__', 'builtins'):\n@@ -305,6 +309,9 @@ def stringify(annotation: Any) -> str:\n return annotation.__qualname__\n elif annotation is Ellipsis:\n return '...'\n+ elif annotation is Struct:\n+ # Before Python 3.9, struct.Struct class has incorrect __module__.\n+ return 'struct.Struct'\n \n if sys.version_info >= (3, 7): # py37+\n return _stringify_py37(annotation)\n", + "test_patch": "diff --git a/tests/test_util_typing.py b/tests/test_util_typing.py\n--- a/tests/test_util_typing.py\n+++ b/tests/test_util_typing.py\n@@ -10,6 +10,7 @@\n \n import sys\n from numbers import Integral\n+from struct import Struct\n from typing import (Any, Callable, Dict, Generator, List, NewType, Optional, Tuple, TypeVar,\n Union)\n \n@@ -43,6 +44,7 @@ def test_restify():\n assert restify(str) == \":class:`str`\"\n assert restify(None) == \":obj:`None`\"\n assert restify(Integral) == \":class:`numbers.Integral`\"\n+ assert restify(Struct) == \":class:`struct.Struct`\"\n assert restify(Any) == \":obj:`Any`\"\n \n \n@@ -124,6 +126,7 @@ def test_stringify():\n assert stringify(str) == \"str\"\n assert stringify(None) == \"None\"\n assert stringify(Integral) == \"numbers.Integral\"\n+ assert restify(Struct) == \":class:`struct.Struct`\"\n assert stringify(Any) == \"Any\"\n \n \n", + "fail_to_pass": "[\"tests/test_util_typing.py::test_restify\", \"tests/test_util_typing.py::test_stringify\"]", + "pass_to_pass": "[\"tests/test_util_typing.py::test_restify_type_hints_containers\", \"tests/test_util_typing.py::test_restify_type_hints_Callable\", \"tests/test_util_typing.py::test_restify_type_hints_Union\", \"tests/test_util_typing.py::test_restify_type_hints_typevars\", \"tests/test_util_typing.py::test_restify_type_hints_custom_class\", \"tests/test_util_typing.py::test_restify_type_hints_alias\", \"tests/test_util_typing.py::test_restify_type_ForwardRef\", \"tests/test_util_typing.py::test_restify_broken_type_hints\", \"tests/test_util_typing.py::test_stringify_type_hints_containers\", \"tests/test_util_typing.py::test_stringify_Annotated\", \"tests/test_util_typing.py::test_stringify_type_hints_string\", \"tests/test_util_typing.py::test_stringify_type_hints_Callable\", \"tests/test_util_typing.py::test_stringify_type_hints_Union\", \"tests/test_util_typing.py::test_stringify_type_hints_typevars\", \"tests/test_util_typing.py::test_stringify_type_hints_custom_class\", \"tests/test_util_typing.py::test_stringify_type_hints_alias\", \"tests/test_util_typing.py::test_stringify_broken_type_hints\"]", + "expected_spans": { + "sphinx/util/typing.py": [ + "imports", + "restify", + "stringify" + ] + }, + "test_file_spans": { + "tests/test_util_typing.py": [ + "imports", + "test_restify", + "test_stringify" + ] + }, + "resolved_by": [ + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sphinx/util/typing.py": [ + "imports", + "restify", + "stringify" + ] + }, + "alternative_spans": { + "sphinx/util/typing.py": [ + "imports", + "restify", + "stringify" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sphinx/util/typing.py": [ + "restify", + "stringify" + ] + }, + "alternative_spans": { + "sphinx/util/typing.py": [ + "restify", + "stringify" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sphinx/util/typing.py": [ + "imports", + "restify", + "stringify" + ] + }, + "alternative_spans": { + "sphinx/util/typing.py": [ + "imports", + "restify", + "stringify" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240811_gru", + "spans": { + "sphinx/util/typing.py": [ + "restify", + "stringify" + ] + } + } + ] + }, + { + "instance_id": "sphinx-doc__sphinx-8713", + "repo": "sphinx-doc/sphinx", + "base_commit": "3ed7590ed411bd93b26098faab4f23619cdb2267", + "problem_statement": "napoleon_use_param should also affect \"other parameters\" section\nSubject: napoleon_use_param should also affect \"other parameters\" section\r\n\r\n### Problem\r\nCurrently, napoleon always renders the Other parameters section as if napoleon_use_param was False, see source\r\n```\r\n def _parse_other_parameters_section(self, section):\r\n # type: (unicode) -> List[unicode]\r\n return self._format_fields(_('Other Parameters'), self._consume_fields())\r\n\r\n def _parse_parameters_section(self, section):\r\n # type: (unicode) -> List[unicode]\r\n fields = self._consume_fields()\r\n if self._config.napoleon_use_param:\r\n return self._format_docutils_params(fields)\r\n else:\r\n return self._format_fields(_('Parameters'), fields)\r\n```\r\nwhereas it would make sense that this section should follow the same formatting rules as the Parameters section.\r\n\r\n#### Procedure to reproduce the problem\r\n```\r\nIn [5]: print(str(sphinx.ext.napoleon.NumpyDocstring(\"\"\"\\ \r\n ...: Parameters \r\n ...: ---------- \r\n ...: x : int \r\n ...: \r\n ...: Other parameters \r\n ...: ---------------- \r\n ...: y: float \r\n ...: \"\"\"))) \r\n:param x:\r\n:type x: int\r\n\r\n:Other Parameters: **y** (*float*)\r\n```\r\n\r\nNote the difference in rendering.\r\n\r\n#### Error logs / results\r\nSee above.\r\n\r\n#### Expected results\r\n```\r\n:param x:\r\n:type x: int\r\n\r\n:Other Parameters: // Or some other kind of heading.\r\n:param: y\r\n:type y: float\r\n```\r\n\r\nAlternatively another separate config value could be introduced, but that seems a bit overkill.\r\n\r\n### Reproducible project / your project\r\nN/A\r\n\r\n### Environment info\r\n- OS: Linux\r\n- Python version: 3.7\r\n- Sphinx version: 1.8.1\r\n\n", + "golden_patch": "diff --git a/sphinx/ext/napoleon/docstring.py b/sphinx/ext/napoleon/docstring.py\n--- a/sphinx/ext/napoleon/docstring.py\n+++ b/sphinx/ext/napoleon/docstring.py\n@@ -682,7 +682,13 @@ def _parse_notes_section(self, section: str) -> List[str]:\n return self._parse_generic_section(_('Notes'), use_admonition)\n \n def _parse_other_parameters_section(self, section: str) -> List[str]:\n- return self._format_fields(_('Other Parameters'), self._consume_fields())\n+ if self._config.napoleon_use_param:\n+ # Allow to declare multiple parameters at once (ex: x, y: int)\n+ fields = self._consume_fields(multiple=True)\n+ return self._format_docutils_params(fields)\n+ else:\n+ fields = self._consume_fields()\n+ return self._format_fields(_('Other Parameters'), fields)\n \n def _parse_parameters_section(self, section: str) -> List[str]:\n if self._config.napoleon_use_param:\n", + "test_patch": "diff --git a/tests/test_ext_napoleon_docstring.py b/tests/test_ext_napoleon_docstring.py\n--- a/tests/test_ext_napoleon_docstring.py\n+++ b/tests/test_ext_napoleon_docstring.py\n@@ -1441,12 +1441,18 @@ def test_parameters_with_class_reference(self):\n ----------\n param1 : :class:`MyClass ` instance\n \n+Other Parameters\n+----------------\n+param2 : :class:`MyClass ` instance\n+\n \"\"\"\n \n config = Config(napoleon_use_param=False)\n actual = str(NumpyDocstring(docstring, config))\n expected = \"\"\"\\\n :Parameters: **param1** (:class:`MyClass ` instance)\n+\n+:Other Parameters: **param2** (:class:`MyClass ` instance)\n \"\"\"\n self.assertEqual(expected, actual)\n \n@@ -1455,6 +1461,9 @@ def test_parameters_with_class_reference(self):\n expected = \"\"\"\\\n :param param1:\n :type param1: :class:`MyClass ` instance\n+\n+:param param2:\n+:type param2: :class:`MyClass ` instance\n \"\"\"\n self.assertEqual(expected, actual)\n \n", + "fail_to_pass": "[\"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_parameters_with_class_reference\"]", + "pass_to_pass": "[\"tests/test_ext_napoleon_docstring.py::NamedtupleSubclassTest::test_attributes_docstring\", \"tests/test_ext_napoleon_docstring.py::InlineAttributeTest::test_class_data_member\", \"tests/test_ext_napoleon_docstring.py::InlineAttributeTest::test_class_data_member_inline\", \"tests/test_ext_napoleon_docstring.py::InlineAttributeTest::test_class_data_member_inline_no_type\", \"tests/test_ext_napoleon_docstring.py::InlineAttributeTest::test_class_data_member_inline_ref_in_type\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_attributes_with_class_reference\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_code_block_in_returns_section\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_colon_in_return_type\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_custom_generic_sections\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_docstrings\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_keywords_with_types\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_kwargs_in_arguments\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_list_in_parameter_description\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_noindex\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_parameters_with_class_reference\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_pep526_annotations\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_raises_types\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_section_header_formatting\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_sphinx_admonitions\", \"tests/test_ext_napoleon_docstring.py::GoogleDocstringTest::test_xrefs_in_return_type\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_colon_in_return_type\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_convert_numpy_type_spec\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_docstrings\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_list_in_parameter_description\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_multiple_parameters\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_parameter_types\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_parameters_without_class_reference\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_raises_types\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_recombine_set_tokens\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_recombine_set_tokens_invalid\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_return_types\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_section_header_underline_length\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_see_also_refs\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_sphinx_admonitions\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_token_type\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_tokenize_type_spec\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_type_preprocessor\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_underscore_in_attribute\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_underscore_in_attribute_strip_signature_backslash\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_xrefs_in_return_type\", \"tests/test_ext_napoleon_docstring.py::NumpyDocstringTest::test_yield_types\", \"tests/test_ext_napoleon_docstring.py::TestNumpyDocstring::test_escape_args_and_kwargs[x,\", \"tests/test_ext_napoleon_docstring.py::TestNumpyDocstring::test_escape_args_and_kwargs[*args,\", \"tests/test_ext_napoleon_docstring.py::TestNumpyDocstring::test_escape_args_and_kwargs[*x,\", \"tests/test_ext_napoleon_docstring.py::TestNumpyDocstring::test_pep526_annotations\"]", + "expected_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "test_file_spans": { + "tests/test_ext_napoleon_docstring.py": [ + "NumpyDocstringTest.test_parameters_with_class_reference" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section", + "_token_type" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section", + "_token_type" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "imports", + "GoogleDocstring", + "GoogleDocstring.__init__", + "GoogleDocstring.__str__", + "GoogleDocstring.lines", + "GoogleDocstring._consume_indented_block", + "GoogleDocstring._consume_contiguous", + "GoogleDocstring._consume_empty", + "GoogleDocstring._consume_field", + "GoogleDocstring._consume_fields", + "GoogleDocstring._consume_inline_attribute", + "GoogleDocstring._consume_returns_section", + "GoogleDocstring._consume_usage_section", + "GoogleDocstring._consume_section_header", + "GoogleDocstring._consume_to_end", + "GoogleDocstring._consume_to_next_section", + "GoogleDocstring._dedent", + "GoogleDocstring._escape_args_and_kwargs", + "GoogleDocstring._fix_field_desc", + "GoogleDocstring._format_admonition", + "GoogleDocstring._format_block", + "GoogleDocstring._format_docutils_params", + "GoogleDocstring._format_field", + "GoogleDocstring._format_fields", + "GoogleDocstring._get_current_indent", + "GoogleDocstring._get_indent", + "GoogleDocstring._get_initial_indent", + "GoogleDocstring._get_min_indent", + "GoogleDocstring._indent", + "GoogleDocstring._is_indented", + "GoogleDocstring._is_list", + "GoogleDocstring._is_section_header", + "GoogleDocstring._is_section_break", + "GoogleDocstring._load_custom_sections", + "GoogleDocstring._parse", + "GoogleDocstring._parse_admonition", + "GoogleDocstring._parse_attribute_docstring", + "GoogleDocstring._parse_attributes_section", + "GoogleDocstring._parse_examples_section", + "GoogleDocstring._parse_custom_generic_section", + "GoogleDocstring._parse_usage_section", + "GoogleDocstring._parse_generic_section", + "GoogleDocstring._parse_keyword_arguments_section", + "GoogleDocstring._parse_methods_section", + "GoogleDocstring._parse_notes_section", + "GoogleDocstring._parse_other_parameters_section", + "GoogleDocstring._parse_parameters_section", + "GoogleDocstring._parse_raises_section", + "GoogleDocstring._parse_receives_section", + "GoogleDocstring._parse_references_section", + "GoogleDocstring._parse_returns_section", + "GoogleDocstring._parse_see_also_section", + "GoogleDocstring._parse_warns_section", + "GoogleDocstring._parse_yields_section", + "GoogleDocstring._partition_field_on_colon", + "GoogleDocstring._qualify_name", + "GoogleDocstring._strip_empty", + "GoogleDocstring._lookup_annotation", + "_recombine_set_tokens", + "_tokenize_type_spec", + "_token_type", + "_convert_numpy_type_spec", + "NumpyDocstring.__init__", + "NumpyDocstring._get_location", + "NumpyDocstring._escape_args_and_kwargs", + "NumpyDocstring._consume_field", + "NumpyDocstring._consume_returns_section", + "NumpyDocstring._consume_section_header", + "NumpyDocstring._is_section_break", + "NumpyDocstring._is_section_header", + "NumpyDocstring._parse_see_also_section", + "NumpyDocstring._parse_numpydoc_see_also_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "imports", + "GoogleDocstring", + "GoogleDocstring.__init__", + "GoogleDocstring.__str__", + "GoogleDocstring.lines", + "GoogleDocstring._consume_indented_block", + "GoogleDocstring._consume_contiguous", + "GoogleDocstring._consume_empty", + "GoogleDocstring._consume_field", + "GoogleDocstring._consume_fields", + "GoogleDocstring._consume_inline_attribute", + "GoogleDocstring._consume_returns_section", + "GoogleDocstring._consume_usage_section", + "GoogleDocstring._consume_section_header", + "GoogleDocstring._consume_to_end", + "GoogleDocstring._consume_to_next_section", + "GoogleDocstring._dedent", + "GoogleDocstring._escape_args_and_kwargs", + "GoogleDocstring._fix_field_desc", + "GoogleDocstring._format_admonition", + "GoogleDocstring._format_block", + "GoogleDocstring._format_docutils_params", + "GoogleDocstring._format_field", + "GoogleDocstring._format_fields", + "GoogleDocstring._get_current_indent", + "GoogleDocstring._get_indent", + "GoogleDocstring._get_initial_indent", + "GoogleDocstring._get_min_indent", + "GoogleDocstring._indent", + "GoogleDocstring._is_indented", + "GoogleDocstring._is_list", + "GoogleDocstring._is_section_header", + "GoogleDocstring._is_section_break", + "GoogleDocstring._load_custom_sections", + "GoogleDocstring._parse", + "GoogleDocstring._parse_admonition", + "GoogleDocstring._parse_attribute_docstring", + "GoogleDocstring._parse_attributes_section", + "GoogleDocstring._parse_examples_section", + "GoogleDocstring._parse_custom_generic_section", + "GoogleDocstring._parse_usage_section", + "GoogleDocstring._parse_generic_section", + "GoogleDocstring._parse_keyword_arguments_section", + "GoogleDocstring._parse_methods_section", + "GoogleDocstring._parse_notes_section", + "GoogleDocstring._parse_other_parameters_section", + "GoogleDocstring._parse_parameters_section", + "GoogleDocstring._parse_raises_section", + "GoogleDocstring._parse_receives_section", + "GoogleDocstring._parse_references_section", + "GoogleDocstring._parse_returns_section", + "GoogleDocstring._parse_see_also_section", + "GoogleDocstring._parse_warns_section", + "GoogleDocstring._parse_yields_section", + "GoogleDocstring._partition_field_on_colon", + "GoogleDocstring._qualify_name", + "GoogleDocstring._strip_empty", + "GoogleDocstring._lookup_annotation", + "_recombine_set_tokens", + "_tokenize_type_spec", + "_token_type", + "_convert_numpy_type_spec", + "NumpyDocstring.__init__", + "NumpyDocstring._get_location", + "NumpyDocstring._escape_args_and_kwargs", + "NumpyDocstring._consume_field", + "NumpyDocstring._consume_returns_section", + "NumpyDocstring._consume_section_header", + "NumpyDocstring._is_section_break", + "NumpyDocstring._is_section_header", + "NumpyDocstring._parse_see_also_section", + "NumpyDocstring._parse_numpydoc_see_also_section" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ], + "tests/test_ext_napoleon_docstring.py": [ + "NumpyDocstringTest.test_multiple_parameters" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring", + "GoogleDocstring.__str__", + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring", + "GoogleDocstring.__str__", + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_notes_section", + "GoogleDocstring._parse_other_parameters_section", + "GoogleDocstring._parse_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_notes_section", + "GoogleDocstring._parse_other_parameters_section", + "GoogleDocstring._parse_parameters_section" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ], + "tests/test_ext_napoleon_docstring.py": [ + "GoogleDocstringTest.test_pep526_annotations" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + }, + "alternative_spans": { + "sphinx/ext/napoleon/docstring.py": [ + "GoogleDocstring._parse_other_parameters_section" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-13925", + "repo": "django/django", + "base_commit": "0c42cdf0d2422f4c080e93594d5d15381d6e955e", + "problem_statement": "models.W042 is raised on inherited manually specified primary key.\nDescription\n\t\nI have models which inherit from other models, and they should inherit the primary key. This works fine with Django 3.1. However, if I install Django 3.2 alpha, when I run make_migrations I get the following error messages:\nSystem check identified some issues:\nWARNINGS:\naccounts.ReservedUsername: (models.W042) Auto-created primary key used when not defining a primary key type, by default 'django.db.models.AutoField'.\n\t\tHINT: Configure the DEFAULT_AUTO_FIELD setting or the SpeedyCoreAccountsConfig.default_auto_field attribute to point to a subclass of AutoField, e.g. 'django.db.models.BigAutoField'.\naccounts.User: (models.W042) Auto-created primary key used when not defining a primary key type, by default 'django.db.models.AutoField'.\n\t\tHINT: Configure the DEFAULT_AUTO_FIELD setting or the SpeedyCoreAccountsConfig.default_auto_field attribute to point to a subclass of AutoField, e.g. 'django.db.models.BigAutoField'.\nblocks.Block: (models.W042) Auto-created primary key used when not defining a primary key type, by default 'django.db.models.AutoField'.\n\t\tHINT: Configure the DEFAULT_AUTO_FIELD setting or the AppConfig.default_auto_field attribute to point to a subclass of AutoField, e.g. 'django.db.models.BigAutoField'.\ncontact_by_form.Feedback: (models.W042) Auto-created primary key used when not defining a primary key type, by default 'django.db.models.AutoField'.\n\t\tHINT: Configure the DEFAULT_AUTO_FIELD setting or the SpeedyCoreContactByFormConfig.default_auto_field attribute to point to a subclass of AutoField, e.g. 'django.db.models.BigAutoField'.\ncore_messages.ReadMark: (models.W042) Auto-created primary key used when not defining a primary key type, by default 'django.db.models.AutoField'.\n\t\tHINT: Configure the DEFAULT_AUTO_FIELD setting or the SpeedyCoreMessagesConfig.default_auto_field attribute to point to a subclass of AutoField, e.g. 'django.db.models.BigAutoField'.\nfriendship.Block: (models.W042) Auto-created primary key used when not defining a primary key type, by default 'django.db.models.AutoField'.\n\t\tHINT: Configure the DEFAULT_AUTO_FIELD setting or the AppConfig.default_auto_field attribute to point to a subclass of AutoField, e.g. 'django.db.models.BigAutoField'.\nfriendship.Follow: (models.W042) Auto-created primary key used when not defining a primary key type, by default 'django.db.models.AutoField'.\n\t\tHINT: Configure the DEFAULT_AUTO_FIELD setting or the AppConfig.default_auto_field attribute to point to a subclass of AutoField, e.g. 'django.db.models.BigAutoField'.\nfriendship.Friend: (models.W042) Auto-created primary key used when not defining a primary key type, by default 'django.db.models.AutoField'.\n\t\tHINT: Configure the DEFAULT_AUTO_FIELD setting or the AppConfig.default_auto_field attribute to point to a subclass of AutoField, e.g. 'django.db.models.BigAutoField'.\nfriendship.FriendshipRequest: (models.W042) Auto-created primary key used when not defining a primary key type, by default 'django.db.models.AutoField'.\n\t\tHINT: Configure the DEFAULT_AUTO_FIELD setting or the AppConfig.default_auto_field attribute to point to a subclass of AutoField, e.g. 'django.db.models.BigAutoField'.\nlikes.UserLike: (models.W042) Auto-created primary key used when not defining a primary key type, by default 'django.db.models.AutoField'.\n\t\tHINT: Configure the DEFAULT_AUTO_FIELD setting or the AppConfig.default_auto_field attribute to point to a subclass of AutoField, e.g. 'django.db.models.BigAutoField'.\nuploads.Image: (models.W042) Auto-created primary key used when not defining a primary key type, by default 'django.db.models.AutoField'.\n\t\tHINT: Configure the DEFAULT_AUTO_FIELD setting or the AppConfig.default_auto_field attribute to point to a subclass of AutoField, e.g. 'django.db.models.BigAutoField'.\nThese models should not use auto-created primary keys! I already defined the primary key in the ancestor of the model. For example class Entity which class User inherits from. It looks to me like a bug in Django 3.2 alpha.\n", + "golden_patch": "diff --git a/django/db/models/base.py b/django/db/models/base.py\n--- a/django/db/models/base.py\n+++ b/django/db/models/base.py\n@@ -1299,6 +1299,11 @@ def check(cls, **kwargs):\n def _check_default_pk(cls):\n if (\n cls._meta.pk.auto_created and\n+ # Inherited PKs are checked in parents models.\n+ not (\n+ isinstance(cls._meta.pk, OneToOneField) and\n+ cls._meta.pk.remote_field.parent_link\n+ ) and\n not settings.is_overridden('DEFAULT_AUTO_FIELD') and\n not cls._meta.app_config._is_default_auto_field_overridden\n ):\n", + "test_patch": "diff --git a/tests/check_framework/test_model_checks.py b/tests/check_framework/test_model_checks.py\n--- a/tests/check_framework/test_model_checks.py\n+++ b/tests/check_framework/test_model_checks.py\n@@ -376,23 +376,62 @@ def mocked_is_overridden(self, setting):\n @isolate_apps('check_framework.apps.CheckDefaultPKConfig', attr_name='apps')\n @override_system_checks([checks.model_checks.check_all_models])\n class ModelDefaultAutoFieldTests(SimpleTestCase):\n+ msg = (\n+ \"Auto-created primary key used when not defining a primary key type, \"\n+ \"by default 'django.db.models.AutoField'.\"\n+ )\n+ hint = (\n+ \"Configure the DEFAULT_AUTO_FIELD setting or the \"\n+ \"CheckDefaultPKConfig.default_auto_field attribute to point to a \"\n+ \"subclass of AutoField, e.g. 'django.db.models.BigAutoField'.\"\n+ )\n+\n def test_auto_created_pk(self):\n class Model(models.Model):\n pass\n \n self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), [\n- Warning(\n- \"Auto-created primary key used when not defining a primary \"\n- \"key type, by default 'django.db.models.AutoField'.\",\n- hint=(\n- \"Configure the DEFAULT_AUTO_FIELD setting or the \"\n- \"CheckDefaultPKConfig.default_auto_field attribute to \"\n- \"point to a subclass of AutoField, e.g. \"\n- \"'django.db.models.BigAutoField'.\"\n- ),\n- obj=Model,\n- id='models.W042',\n- ),\n+ Warning(self.msg, hint=self.hint, obj=Model, id='models.W042'),\n+ ])\n+\n+ def test_explicit_inherited_pk(self):\n+ class Parent(models.Model):\n+ id = models.AutoField(primary_key=True)\n+\n+ class Child(Parent):\n+ pass\n+\n+ self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), [])\n+\n+ def test_explicit_inherited_parent_link(self):\n+ class Parent(models.Model):\n+ id = models.AutoField(primary_key=True)\n+\n+ class Child(Parent):\n+ parent_ptr = models.OneToOneField(Parent, models.CASCADE, parent_link=True)\n+\n+ self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), [])\n+\n+ def test_auto_created_inherited_pk(self):\n+ class Parent(models.Model):\n+ pass\n+\n+ class Child(Parent):\n+ pass\n+\n+ self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), [\n+ Warning(self.msg, hint=self.hint, obj=Parent, id='models.W042'),\n+ ])\n+\n+ def test_auto_created_inherited_parent_link(self):\n+ class Parent(models.Model):\n+ pass\n+\n+ class Child(Parent):\n+ parent_ptr = models.OneToOneField(Parent, models.CASCADE, parent_link=True)\n+\n+ self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), [\n+ Warning(self.msg, hint=self.hint, obj=Parent, id='models.W042'),\n ])\n \n @override_settings(DEFAULT_AUTO_FIELD='django.db.models.BigAutoField')\n", + "fail_to_pass": "[\"test_auto_created_inherited_pk (check_framework.test_model_checks.ModelDefaultAutoFieldTests)\", \"test_explicit_inherited_pk (check_framework.test_model_checks.ModelDefaultAutoFieldTests)\"]", + "pass_to_pass": "[\"test_app_default_auto_field (check_framework.test_model_checks.ModelDefaultAutoFieldTests)\", \"test_auto_created_inherited_parent_link (check_framework.test_model_checks.ModelDefaultAutoFieldTests)\", \"test_auto_created_pk (check_framework.test_model_checks.ModelDefaultAutoFieldTests)\", \"test_default_auto_field_setting (check_framework.test_model_checks.ModelDefaultAutoFieldTests)\", \"test_explicit_inherited_parent_link (check_framework.test_model_checks.ModelDefaultAutoFieldTests)\", \"test_explicit_pk (check_framework.test_model_checks.ModelDefaultAutoFieldTests)\", \"test_collision_abstract_model (check_framework.test_model_checks.IndexNameTests)\", \"test_collision_across_apps (check_framework.test_model_checks.IndexNameTests)\", \"test_collision_in_different_models (check_framework.test_model_checks.IndexNameTests)\", \"test_collision_in_same_model (check_framework.test_model_checks.IndexNameTests)\", \"test_no_collision_abstract_model_interpolation (check_framework.test_model_checks.IndexNameTests)\", \"test_no_collision_across_apps_interpolation (check_framework.test_model_checks.IndexNameTests)\", \"test_collision_abstract_model (check_framework.test_model_checks.ConstraintNameTests)\", \"test_collision_across_apps (check_framework.test_model_checks.ConstraintNameTests)\", \"test_collision_in_different_models (check_framework.test_model_checks.ConstraintNameTests)\", \"test_collision_in_same_model (check_framework.test_model_checks.ConstraintNameTests)\", \"test_no_collision_abstract_model_interpolation (check_framework.test_model_checks.ConstraintNameTests)\", \"test_no_collision_across_apps_interpolation (check_framework.test_model_checks.ConstraintNameTests)\", \"test_collision_across_apps (check_framework.test_model_checks.DuplicateDBTableTests)\", \"test_collision_across_apps_database_routers_installed (check_framework.test_model_checks.DuplicateDBTableTests)\", \"test_collision_in_same_app (check_framework.test_model_checks.DuplicateDBTableTests)\", \"test_collision_in_same_app_database_routers_installed (check_framework.test_model_checks.DuplicateDBTableTests)\", \"test_no_collision_for_proxy_models (check_framework.test_model_checks.DuplicateDBTableTests)\", \"test_no_collision_for_unmanaged_models (check_framework.test_model_checks.DuplicateDBTableTests)\"]", + "expected_spans": { + "django/db/models/base.py": [ + "Model._check_default_pk" + ] + }, + "test_file_spans": { + "tests/check_framework/test_model_checks.py": [ + "ModelDefaultAutoFieldTests.test_auto_created_pk" + ] + }, + "resolved_by": [ + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/models/base.py": [ + "Model._check_default_pk" + ] + }, + "alternative_spans": { + "django/db/models/base.py": [ + "Model._check_default_pk" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/db/models/base.py": [ + "Model._check_default_pk" + ] + }, + "alternative_spans": { + "django/db/models/base.py": [ + "Model._check_default_pk" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/db/models/base.py": [ + "Model._check_default_pk" + ] + }, + "alternative_spans": { + "django/db/models/base.py": [ + "Model._check_default_pk" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/db/models/base.py": [ + "Model._check_default_pk" + ] + }, + "alternative_spans": { + "django/db/models/base.py": [ + "Model._check_default_pk" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/core/checks/model_checks.py": [ + "check_all_models", + "_check_lazy_references" + ] + }, + "alternative_spans": {} + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sphinx-doc__sphinx-8721", + "repo": "sphinx-doc/sphinx", + "base_commit": "82ef497a8c88f0f6e50d84520e7276bfbf65025d", + "problem_statement": "viewcode creates pages for epub even if `viewcode_enable_epub=False` on `make html epub`\n**Describe the bug**\r\nviewcode creates pages for epub even if `viewcode_enable_epub=False` on `make html epub`\r\n\r\n**To Reproduce**\r\n```\r\n$ make html epub\r\n```\r\n\r\n**Expected behavior**\r\nmodule pages should not be created for epub by default.\r\n\r\n**Your project**\r\nNo\r\n\r\n**Screenshots**\r\nNo\r\n\r\n**Environment info**\r\n- OS: Mac\r\n- Python version: 3.9.1\r\n- Sphinx version: HEAD of 3.x\r\n- Sphinx extensions: sphinx.ext.viewcode\r\n- Extra tools: No\r\n\r\n**Additional context**\r\nNo\r\n\n", + "golden_patch": "diff --git a/sphinx/ext/viewcode.py b/sphinx/ext/viewcode.py\n--- a/sphinx/ext/viewcode.py\n+++ b/sphinx/ext/viewcode.py\n@@ -182,6 +182,10 @@ def collect_pages(app: Sphinx) -> Generator[Tuple[str, Dict[str, Any], str], Non\n env = app.builder.env\n if not hasattr(env, '_viewcode_modules'):\n return\n+ if app.builder.name == \"singlehtml\":\n+ return\n+ if app.builder.name.startswith(\"epub\") and not env.config.viewcode_enable_epub:\n+ return\n highlighter = app.builder.highlighter # type: ignore\n urito = app.builder.get_relative_uri\n \n", + "test_patch": "diff --git a/tests/test_ext_viewcode.py b/tests/test_ext_viewcode.py\n--- a/tests/test_ext_viewcode.py\n+++ b/tests/test_ext_viewcode.py\n@@ -49,6 +49,21 @@ def test_viewcode(app, status, warning):\n ' """\\n') in result\n \n \n+@pytest.mark.sphinx('epub', testroot='ext-viewcode')\n+def test_viewcode_epub_default(app, status, warning):\n+ app.builder.build_all()\n+\n+ assert not (app.outdir / '_modules/spam/mod1.xhtml').exists()\n+\n+\n+@pytest.mark.sphinx('epub', testroot='ext-viewcode',\n+ confoverrides={'viewcode_enable_epub': True})\n+def test_viewcode_epub_enabled(app, status, warning):\n+ app.builder.build_all()\n+\n+ assert (app.outdir / '_modules/spam/mod1.xhtml').exists()\n+\n+\n @pytest.mark.sphinx(testroot='ext-viewcode', tags=['test_linkcode'])\n def test_linkcode(app, status, warning):\n app.builder.build(['objects'])\n", + "fail_to_pass": "[\"tests/test_ext_viewcode.py::test_viewcode_epub_default\"]", + "pass_to_pass": "[\"tests/test_ext_viewcode.py::test_viewcode_epub_enabled\", \"tests/test_ext_viewcode.py::test_linkcode\", \"tests/test_ext_viewcode.py::test_local_source_files\"]", + "expected_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + }, + "test_file_spans": { + "tests/test_ext_viewcode.py": [ + "test_linkcode" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "should_generate_module_page", + "collect_pages" + ] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "should_generate_module_page", + "collect_pages" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "imports", + "_get_full_modname", + "doctree_read", + "env_merge_info", + "missing_reference", + "get_module_filename", + "should_generate_module_page", + "collect_pages", + "setup" + ] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "imports", + "_get_full_modname", + "doctree_read", + "env_merge_info", + "missing_reference", + "get_module_filename", + "should_generate_module_page", + "collect_pages", + "setup" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ], + "tests/test_ext_viewcode.py": [] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages", + "setup" + ] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages", + "setup" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages", + "setup" + ] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages", + "setup" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + }, + "alternative_spans": { + "sphinx/ext/viewcode.py": [ + "collect_pages" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-13933", + "repo": "django/django", + "base_commit": "42e8cf47c7ee2db238bf91197ea398126c546741", + "problem_statement": "ModelChoiceField does not provide value of invalid choice when raising ValidationError\nDescription\n\t \n\t\t(last modified by Aaron Wiegel)\n\t \nCompared with ChoiceField and others, ModelChoiceField does not show the value of the invalid choice when raising a validation error. Passing in parameters with the invalid value and modifying the default error message for the code invalid_choice should fix this.\nFrom source code:\nclass ModelMultipleChoiceField(ModelChoiceField):\n\t\"\"\"A MultipleChoiceField whose choices are a model QuerySet.\"\"\"\n\twidget = SelectMultiple\n\thidden_widget = MultipleHiddenInput\n\tdefault_error_messages = {\n\t\t'invalid_list': _('Enter a list of values.'),\n\t\t'invalid_choice': _('Select a valid choice. %(value)s is not one of the'\n\t\t\t\t\t\t\t' available choices.'),\n\t\t'invalid_pk_value': _('\u201c%(pk)s\u201d is not a valid value.')\n\t}\n\t...\nclass ModelChoiceField(ChoiceField):\n\t\"\"\"A ChoiceField whose choices are a model QuerySet.\"\"\"\n\t# This class is a subclass of ChoiceField for purity, but it doesn't\n\t# actually use any of ChoiceField's implementation.\n\tdefault_error_messages = {\n\t\t'invalid_choice': _('Select a valid choice. That choice is not one of'\n\t\t\t\t\t\t\t' the available choices.'),\n\t}\n\t...\n", + "golden_patch": "diff --git a/django/forms/models.py b/django/forms/models.py\n--- a/django/forms/models.py\n+++ b/django/forms/models.py\n@@ -1284,7 +1284,11 @@ def to_python(self, value):\n value = getattr(value, key)\n value = self.queryset.get(**{key: value})\n except (ValueError, TypeError, self.queryset.model.DoesNotExist):\n- raise ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')\n+ raise ValidationError(\n+ self.error_messages['invalid_choice'],\n+ code='invalid_choice',\n+ params={'value': value},\n+ )\n return value\n \n def validate(self, value):\n", + "test_patch": "diff --git a/tests/forms_tests/tests/test_error_messages.py b/tests/forms_tests/tests/test_error_messages.py\n--- a/tests/forms_tests/tests/test_error_messages.py\n+++ b/tests/forms_tests/tests/test_error_messages.py\n@@ -308,3 +308,16 @@ def test_modelchoicefield(self):\n self.assertFormErrors(['REQUIRED'], f.clean, '')\n self.assertFormErrors(['NOT A LIST OF VALUES'], f.clean, '3')\n self.assertFormErrors(['4 IS INVALID CHOICE'], f.clean, ['4'])\n+\n+ def test_modelchoicefield_value_placeholder(self):\n+ f = ModelChoiceField(\n+ queryset=ChoiceModel.objects.all(),\n+ error_messages={\n+ 'invalid_choice': '\"%(value)s\" is not one of the available choices.',\n+ },\n+ )\n+ self.assertFormErrors(\n+ ['\"invalid\" is not one of the available choices.'],\n+ f.clean,\n+ 'invalid',\n+ )\n", + "fail_to_pass": "[\"test_modelchoicefield_value_placeholder (forms_tests.tests.test_error_messages.ModelChoiceFieldErrorMessagesTestCase)\"]", + "pass_to_pass": "[\"test_modelchoicefield (forms_tests.tests.test_error_messages.ModelChoiceFieldErrorMessagesTestCase)\", \"test_booleanfield (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\", \"test_charfield (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\", \"test_choicefield (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\", \"test_datefield (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\", \"test_datetimefield (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\", \"test_decimalfield (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\", \"test_emailfield (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\", \"test_error_messages_escaping (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\", \"test_filefield (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\", \"test_floatfield (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\", \"test_generic_ipaddressfield (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\", \"test_integerfield (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\", \"test_multiplechoicefield (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\", \"test_regexfield (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\", \"test_splitdatetimefield (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\", \"test_subclassing_errorlist (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\", \"test_timefield (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\", \"test_urlfield (forms_tests.tests.test_error_messages.FormsErrorMessagesTestCase)\"]", + "expected_spans": { + "django/forms/models.py": [ + "ModelChoiceField.to_python" + ] + }, + "test_file_spans": { + "tests/forms_tests/tests/test_error_messages.py": [] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.prepare_value", + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.prepare_value", + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/forms/models.py": [ + "docstring", + "imports", + "construct_instance", + "model_to_dict", + "apply_limit_choices_to_to_formfield", + "fields_for_model", + "ModelFormOptions", + "ModelFormMetaclass.__new__", + "BaseModelForm.__init__", + "BaseModelForm._get_validation_exclusions", + "BaseModelForm._update_errors", + "BaseModelForm._post_clean", + "BaseModelForm._save_m2m", + "BaseModelForm.save", + "modelform_factory", + "BaseModelFormSet", + "BaseModelFormSet.__init__", + "BaseModelFormSet._construct_form", + "BaseModelFormSet.get_queryset", + "BaseModelFormSet.save", + "BaseModelFormSet:7", + "BaseModelFormSet.clean", + "BaseModelFormSet.validate_unique", + "BaseModelFormSet.get_unique_error_message", + "BaseModelFormSet.get_date_error_message", + "BaseModelFormSet.get_form_error", + "BaseModelFormSet.save_existing_objects", + "BaseModelFormSet.save_new_objects", + "BaseModelFormSet.add_fields", + "modelformset_factory", + "BaseInlineFormSet", + "BaseInlineFormSet.__init__", + "BaseInlineFormSet._construct_form", + "BaseInlineFormSet.get_default_prefix", + "BaseInlineFormSet.save_new", + "BaseInlineFormSet.add_fields", + "BaseInlineFormSet.get_unique_error_message", + "_get_foreign_key", + "inlineformset_factory", + "InlineForeignKeyField", + "InlineForeignKeyField.__init__", + "InlineForeignKeyField.clean", + "ModelChoiceIteratorValue.__init__", + "ModelChoiceIterator.__init__", + "ModelChoiceIterator.__iter__", + "ModelChoiceIterator.__len__", + "ModelChoiceIterator.__bool__", + "ModelChoiceIterator.choice", + "ModelChoiceField", + "ModelChoiceField.__init__", + "ModelChoiceField.__deepcopy__", + "ModelChoiceField:7", + "ModelChoiceField._get_choices", + "ModelChoiceField.to_python", + "ModelMultipleChoiceField", + "ModelMultipleChoiceField.clean", + "ModelMultipleChoiceField._check_values", + "ModelMultipleChoiceField.prepare_value", + "modelform_defines_fields" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "docstring", + "imports", + "construct_instance", + "model_to_dict", + "apply_limit_choices_to_to_formfield", + "fields_for_model", + "ModelFormOptions", + "ModelFormMetaclass.__new__", + "BaseModelForm.__init__", + "BaseModelForm._get_validation_exclusions", + "BaseModelForm._update_errors", + "BaseModelForm._post_clean", + "BaseModelForm._save_m2m", + "BaseModelForm.save", + "modelform_factory", + "BaseModelFormSet", + "BaseModelFormSet.__init__", + "BaseModelFormSet._construct_form", + "BaseModelFormSet.get_queryset", + "BaseModelFormSet.save", + "BaseModelFormSet:7", + "BaseModelFormSet.clean", + "BaseModelFormSet.validate_unique", + "BaseModelFormSet.get_unique_error_message", + "BaseModelFormSet.get_date_error_message", + "BaseModelFormSet.get_form_error", + "BaseModelFormSet.save_existing_objects", + "BaseModelFormSet.save_new_objects", + "BaseModelFormSet.add_fields", + "modelformset_factory", + "BaseInlineFormSet", + "BaseInlineFormSet.__init__", + "BaseInlineFormSet._construct_form", + "BaseInlineFormSet.get_default_prefix", + "BaseInlineFormSet.save_new", + "BaseInlineFormSet.add_fields", + "BaseInlineFormSet.get_unique_error_message", + "_get_foreign_key", + "inlineformset_factory", + "InlineForeignKeyField", + "InlineForeignKeyField.__init__", + "InlineForeignKeyField.clean", + "ModelChoiceIteratorValue.__init__", + "ModelChoiceIterator.__init__", + "ModelChoiceIterator.__iter__", + "ModelChoiceIterator.__len__", + "ModelChoiceIterator.__bool__", + "ModelChoiceIterator.choice", + "ModelChoiceField", + "ModelChoiceField.__init__", + "ModelChoiceField.__deepcopy__", + "ModelChoiceField:7", + "ModelChoiceField._get_choices", + "ModelChoiceField.to_python", + "ModelMultipleChoiceField", + "ModelMultipleChoiceField.clean", + "ModelMultipleChoiceField._check_values", + "ModelMultipleChoiceField.prepare_value", + "modelform_defines_fields" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python", + "ModelChoiceField.validate" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python", + "ModelChoiceField.validate" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "django/forms/models.py": [ + "InlineForeignKeyField.clean", + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "InlineForeignKeyField.clean", + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python", + "ModelChoiceField.validate" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python", + "ModelChoiceField.validate" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + }, + "alternative_spans": { + "django/forms/models.py": [ + "ModelChoiceField", + "ModelChoiceField.to_python" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sphinx-doc__sphinx-8801", + "repo": "sphinx-doc/sphinx", + "base_commit": "7ca279e33aebb60168d35e6be4ed059f4a68f2c1", + "problem_statement": "autodoc: The annotation only member in superclass is treated as \"undocumented\"\n**Describe the bug**\r\nautodoc: The annotation only member in superclass is treated as \"undocumented\".\r\n\r\n**To Reproduce**\r\n\r\n```\r\n# example.py\r\nclass Foo:\r\n \"\"\"docstring\"\"\"\r\n attr1: int #: docstring\r\n\r\n\r\nclass Bar(Foo):\r\n \"\"\"docstring\"\"\"\r\n attr2: str #: docstring\r\n```\r\n```\r\n# index.rst\r\n.. autoclass:: example.Bar\r\n :members:\r\n :inherited-members:\r\n```\r\n\r\n`Bar.attr1` is not documented. It will be shown if I give `:undoc-members:` option to the autoclass directive call. It seems the attribute is treated as undocumented.\r\n\r\n**Expected behavior**\r\nIt should be shown.\r\n\r\n**Your project**\r\nNo\r\n\r\n**Screenshots**\r\nNo\r\n\r\n**Environment info**\r\n- OS: Mac\r\n- Python version: 3.9.1\r\n- Sphinx version: HEAD of 3.x\r\n- Sphinx extensions: sphinx.ext.autodoc\r\n- Extra tools: No\r\n\r\n**Additional context**\r\nNo\r\n\n", + "golden_patch": "diff --git a/sphinx/ext/autodoc/importer.py b/sphinx/ext/autodoc/importer.py\n--- a/sphinx/ext/autodoc/importer.py\n+++ b/sphinx/ext/autodoc/importer.py\n@@ -294,24 +294,35 @@ def get_class_members(subject: Any, objpath: List[str], attrgetter: Callable\n \n try:\n for cls in getmro(subject):\n+ try:\n+ modname = safe_getattr(cls, '__module__')\n+ qualname = safe_getattr(cls, '__qualname__')\n+ analyzer = ModuleAnalyzer.for_module(modname)\n+ analyzer.analyze()\n+ except AttributeError:\n+ qualname = None\n+ analyzer = None\n+ except PycodeError:\n+ analyzer = None\n+\n # annotation only member (ex. attr: int)\n for name in getannotations(cls):\n name = unmangle(cls, name)\n if name and name not in members:\n- members[name] = ObjectMember(name, INSTANCEATTR, class_=cls)\n+ if analyzer and (qualname, name) in analyzer.attr_docs:\n+ docstring = '\\n'.join(analyzer.attr_docs[qualname, name])\n+ else:\n+ docstring = None\n+\n+ members[name] = ObjectMember(name, INSTANCEATTR, class_=cls,\n+ docstring=docstring)\n \n # append instance attributes (cf. self.attr1) if analyzer knows\n- try:\n- modname = safe_getattr(cls, '__module__')\n- qualname = safe_getattr(cls, '__qualname__')\n- analyzer = ModuleAnalyzer.for_module(modname)\n- analyzer.analyze()\n+ if analyzer:\n for (ns, name), docstring in analyzer.attr_docs.items():\n if ns == qualname and name not in members:\n members[name] = ObjectMember(name, INSTANCEATTR, class_=cls,\n docstring='\\n'.join(docstring))\n- except (AttributeError, PycodeError):\n- pass\n except AttributeError:\n pass\n \n", + "test_patch": "diff --git a/tests/roots/test-ext-autodoc/target/uninitialized_attributes.py b/tests/roots/test-ext-autodoc/target/uninitialized_attributes.py\nnew file mode 100644\n--- /dev/null\n+++ b/tests/roots/test-ext-autodoc/target/uninitialized_attributes.py\n@@ -0,0 +1,8 @@\n+class Base:\n+ attr1: int #: docstring\n+ attr2: str\n+\n+\n+class Derived(Base):\n+ attr3: int #: docstring\n+ attr4: str\ndiff --git a/tests/test_ext_autodoc_autoclass.py b/tests/test_ext_autodoc_autoclass.py\n--- a/tests/test_ext_autodoc_autoclass.py\n+++ b/tests/test_ext_autodoc_autoclass.py\n@@ -106,6 +106,73 @@ def test_inherited_instance_variable(app):\n ]\n \n \n+@pytest.mark.skipif(sys.version_info < (3, 6), reason='py36+ is available since python3.6.')\n+@pytest.mark.sphinx('html', testroot='ext-autodoc')\n+def test_uninitialized_attributes(app):\n+ options = {\"members\": None,\n+ \"inherited-members\": True}\n+ actual = do_autodoc(app, 'class', 'target.uninitialized_attributes.Derived', options)\n+ assert list(actual) == [\n+ '',\n+ '.. py:class:: Derived()',\n+ ' :module: target.uninitialized_attributes',\n+ '',\n+ '',\n+ ' .. py:attribute:: Derived.attr1',\n+ ' :module: target.uninitialized_attributes',\n+ ' :type: int',\n+ '',\n+ ' docstring',\n+ '',\n+ '',\n+ ' .. py:attribute:: Derived.attr3',\n+ ' :module: target.uninitialized_attributes',\n+ ' :type: int',\n+ '',\n+ ' docstring',\n+ '',\n+ ]\n+\n+\n+@pytest.mark.skipif(sys.version_info < (3, 6), reason='py36+ is available since python3.6.')\n+@pytest.mark.sphinx('html', testroot='ext-autodoc')\n+def test_undocumented_uninitialized_attributes(app):\n+ options = {\"members\": None,\n+ \"inherited-members\": True,\n+ \"undoc-members\": True}\n+ actual = do_autodoc(app, 'class', 'target.uninitialized_attributes.Derived', options)\n+ assert list(actual) == [\n+ '',\n+ '.. py:class:: Derived()',\n+ ' :module: target.uninitialized_attributes',\n+ '',\n+ '',\n+ ' .. py:attribute:: Derived.attr1',\n+ ' :module: target.uninitialized_attributes',\n+ ' :type: int',\n+ '',\n+ ' docstring',\n+ '',\n+ '',\n+ ' .. py:attribute:: Derived.attr2',\n+ ' :module: target.uninitialized_attributes',\n+ ' :type: str',\n+ '',\n+ '',\n+ ' .. py:attribute:: Derived.attr3',\n+ ' :module: target.uninitialized_attributes',\n+ ' :type: int',\n+ '',\n+ ' docstring',\n+ '',\n+ '',\n+ ' .. py:attribute:: Derived.attr4',\n+ ' :module: target.uninitialized_attributes',\n+ ' :type: str',\n+ '',\n+ ]\n+\n+\n def test_decorators(app):\n actual = do_autodoc(app, 'class', 'target.decorator.Baz')\n assert list(actual) == [\n", + "fail_to_pass": "[\"tests/test_ext_autodoc_autoclass.py::test_uninitialized_attributes\"]", + "pass_to_pass": "[\"tests/test_ext_autodoc_autoclass.py::test_classes\", \"tests/test_ext_autodoc_autoclass.py::test_instance_variable\", \"tests/test_ext_autodoc_autoclass.py::test_inherited_instance_variable\", \"tests/test_ext_autodoc_autoclass.py::test_undocumented_uninitialized_attributes\", \"tests/test_ext_autodoc_autoclass.py::test_decorators\", \"tests/test_ext_autodoc_autoclass.py::test_slots_attribute\", \"tests/test_ext_autodoc_autoclass.py::test_show_inheritance_for_subclass_of_generic_type\", \"tests/test_ext_autodoc_autoclass.py::test_class_alias\"]", + "expected_spans": { + "sphinx/ext/autodoc/importer.py": [ + "get_class_members" + ] + }, + "test_file_spans": { + "tests/test_ext_autodoc_autoclass.py": [ + "test_decorators" + ] + }, + "resolved_by": [ + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sphinx/ext/autodoc/importer.py": [ + "get_class_members" + ] + }, + "alternative_spans": { + "sphinx/ext/autodoc/importer.py": [ + "get_class_members" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-13964", + "repo": "django/django", + "base_commit": "f39634ff229887bf7790c069d0c411b38494ca38", + "problem_statement": "Saving parent object after setting on child leads to data loss for parents with non-numeric primary key.\nDescription\n\t \n\t\t(last modified by Charlie DeTar)\n\t \nGiven a model with a foreign key relation to another model that has a non-auto CharField as its primary key:\nclass Product(models.Model):\n\tsku = models.CharField(primary_key=True, max_length=50)\nclass Order(models.Model):\n\tproduct = models.ForeignKey(Product, on_delete=models.CASCADE)\nIf the relation is initialized on the parent with an empty instance that does not yet specify its primary key, and the primary key is subsequently defined, the parent does not \"see\" the primary key's change:\nwith transaction.atomic():\n\torder = Order()\n\torder.product = Product()\n\torder.product.sku = \"foo\"\n\torder.product.save()\n\torder.save()\n\tassert Order.objects.filter(product_id=\"\").exists() # Succeeds, but shouldn't\n\tassert Order.objects.filter(product=order.product).exists() # Fails\nInstead of product_id being populated with product.sku, it is set to emptystring. The foreign key constraint which would enforce the existence of a product with sku=\"\" is deferred until the transaction commits. The transaction does correctly fail on commit with a ForeignKeyViolation due to the non-existence of a product with emptystring as its primary key.\nOn the other hand, if the related unsaved instance is initialized with its primary key before assignment to the parent, it is persisted correctly:\nwith transaction.atomic():\n\torder = Order()\n\torder.product = Product(sku=\"foo\")\n\torder.product.save()\n\torder.save()\n\tassert Order.objects.filter(product=order.product).exists() # succeeds\nCommitting the transaction also succeeds.\nThis may have something to do with how the Order.product_id field is handled at assignment, together with something about handling fetching of auto vs non-auto primary keys from the related instance.\n", + "golden_patch": "diff --git a/django/db/models/base.py b/django/db/models/base.py\n--- a/django/db/models/base.py\n+++ b/django/db/models/base.py\n@@ -933,7 +933,7 @@ def _prepare_related_fields_for_save(self, operation_name):\n \"%s() prohibited to prevent data loss due to unsaved \"\n \"related object '%s'.\" % (operation_name, field.name)\n )\n- elif getattr(self, field.attname) is None:\n+ elif getattr(self, field.attname) in field.empty_values:\n # Use pk from related object if it has been saved after\n # an assignment.\n setattr(self, field.attname, obj.pk)\n", + "test_patch": "diff --git a/tests/many_to_one/models.py b/tests/many_to_one/models.py\n--- a/tests/many_to_one/models.py\n+++ b/tests/many_to_one/models.py\n@@ -68,6 +68,10 @@ class Parent(models.Model):\n bestchild = models.ForeignKey('Child', models.SET_NULL, null=True, related_name='favored_by')\n \n \n+class ParentStringPrimaryKey(models.Model):\n+ name = models.CharField(primary_key=True, max_length=15)\n+\n+\n class Child(models.Model):\n name = models.CharField(max_length=20)\n parent = models.ForeignKey(Parent, models.CASCADE)\n@@ -77,6 +81,10 @@ class ChildNullableParent(models.Model):\n parent = models.ForeignKey(Parent, models.CASCADE, null=True)\n \n \n+class ChildStringPrimaryKeyParent(models.Model):\n+ parent = models.ForeignKey(ParentStringPrimaryKey, on_delete=models.CASCADE)\n+\n+\n class ToFieldChild(models.Model):\n parent = models.ForeignKey(Parent, models.CASCADE, to_field='name', related_name='to_field_children')\n \ndiff --git a/tests/many_to_one/tests.py b/tests/many_to_one/tests.py\n--- a/tests/many_to_one/tests.py\n+++ b/tests/many_to_one/tests.py\n@@ -7,9 +7,9 @@\n from django.utils.translation import gettext_lazy\n \n from .models import (\n- Article, Category, Child, ChildNullableParent, City, Country, District,\n- First, Parent, Record, Relation, Reporter, School, Student, Third,\n- ToFieldChild,\n+ Article, Category, Child, ChildNullableParent, ChildStringPrimaryKeyParent,\n+ City, Country, District, First, Parent, ParentStringPrimaryKey, Record,\n+ Relation, Reporter, School, Student, Third, ToFieldChild,\n )\n \n \n@@ -549,6 +549,16 @@ def test_save_nullable_fk_after_parent_with_to_field(self):\n self.assertEqual(child.parent, parent)\n self.assertEqual(child.parent_id, parent.name)\n \n+ def test_save_fk_after_parent_with_non_numeric_pk_set_on_child(self):\n+ parent = ParentStringPrimaryKey()\n+ child = ChildStringPrimaryKeyParent(parent=parent)\n+ child.parent.name = 'jeff'\n+ parent.save()\n+ child.save()\n+ child.refresh_from_db()\n+ self.assertEqual(child.parent, parent)\n+ self.assertEqual(child.parent_id, parent.name)\n+\n def test_fk_to_bigautofield(self):\n ch = City.objects.create(name='Chicago')\n District.objects.create(city=ch, name='Far South')\n", + "fail_to_pass": "[\"test_save_fk_after_parent_with_non_numeric_pk_set_on_child (many_to_one.tests.ManyToOneTests)\"]", + "pass_to_pass": "[\"test_add (many_to_one.tests.ManyToOneTests)\", \"test_add_after_prefetch (many_to_one.tests.ManyToOneTests)\", \"test_add_remove_set_by_pk_raises (many_to_one.tests.ManyToOneTests)\", \"test_add_then_remove_after_prefetch (many_to_one.tests.ManyToOneTests)\", \"test_assign (many_to_one.tests.ManyToOneTests)\", \"test_assign_fk_id_none (many_to_one.tests.ManyToOneTests)\", \"test_assign_fk_id_value (many_to_one.tests.ManyToOneTests)\", \"test_cached_foreign_key_with_to_field_not_cleared_by_save (many_to_one.tests.ManyToOneTests)\", \"Model.save() invalidates stale ForeignKey relations after a primary key\", \"test_clear_after_prefetch (many_to_one.tests.ManyToOneTests)\", \"test_create (many_to_one.tests.ManyToOneTests)\", \"test_create_relation_with_gettext_lazy (many_to_one.tests.ManyToOneTests)\", \"test_deepcopy_and_circular_references (many_to_one.tests.ManyToOneTests)\", \"test_delete (many_to_one.tests.ManyToOneTests)\", \"test_explicit_fk (many_to_one.tests.ManyToOneTests)\", \"test_fk_assignment_and_related_object_cache (many_to_one.tests.ManyToOneTests)\", \"test_fk_instantiation_outside_model (many_to_one.tests.ManyToOneTests)\", \"test_fk_to_bigautofield (many_to_one.tests.ManyToOneTests)\", \"test_fk_to_smallautofield (many_to_one.tests.ManyToOneTests)\", \"test_get (many_to_one.tests.ManyToOneTests)\", \"test_hasattr_related_object (many_to_one.tests.ManyToOneTests)\", \"test_manager_class_caching (many_to_one.tests.ManyToOneTests)\", \"test_multiple_foreignkeys (many_to_one.tests.ManyToOneTests)\", \"test_related_object (many_to_one.tests.ManyToOneTests)\", \"test_relation_unsaved (many_to_one.tests.ManyToOneTests)\", \"test_remove_after_prefetch (many_to_one.tests.ManyToOneTests)\", \"test_reverse_assignment_deprecation (many_to_one.tests.ManyToOneTests)\", \"test_reverse_foreign_key_instance_to_field_caching (many_to_one.tests.ManyToOneTests)\", \"test_reverse_selects (many_to_one.tests.ManyToOneTests)\", \"test_save_nullable_fk_after_parent (many_to_one.tests.ManyToOneTests)\", \"test_save_nullable_fk_after_parent_with_to_field (many_to_one.tests.ManyToOneTests)\", \"test_select_related (many_to_one.tests.ManyToOneTests)\", \"test_selects (many_to_one.tests.ManyToOneTests)\", \"test_set (many_to_one.tests.ManyToOneTests)\", \"test_set_after_prefetch (many_to_one.tests.ManyToOneTests)\", \"test_values_list_exception (many_to_one.tests.ManyToOneTests)\"]", + "expected_spans": { + "django/db/models/base.py": [ + "Model._prepare_related_fields_for_save" + ] + }, + "test_file_spans": { + "tests/many_to_one/models.py": [ + "Child", + "ToFieldChild" + ], + "tests/many_to_one/tests.py": [ + "imports", + "ManyToOneTests.test_fk_to_bigautofield" + ] + }, + "resolved_by": [ + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/models/base.py": [ + "Model._prepare_related_fields_for_save" + ], + "django/db/models/fields/related.py": [ + "ForeignKey.to_python", + "ForeignKey.get_db_prep_save", + "ForeignKey.get_db_prep_value", + "ForeignKey.get_prep_value", + "ForeignKey" + ] + }, + "alternative_spans": { + "django/db/models/base.py": [ + "Model._prepare_related_fields_for_save" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/db/models/base.py": [ + "imports", + "Deferred.__repr__", + "subclass_exception", + "_has_contribute_to_class", + "ModelBase", + "ModelBase.__new__", + "ModelBase._prepare", + "ModelStateFieldsCacheDescriptor.__get__", + "ModelState", + "Model.__init__", + "Model.from_db", + "Model.__hash__", + "Model.__setstate__", + "Model:2", + "Model.get_deferred_fields", + "Model.refresh_from_db", + "Model.save", + "Model:4", + "Model.save_base", + "Model._save_parents", + "Model._save_table", + "Model._do_update", + "Model._do_insert", + "Model._prepare_related_fields_for_save", + "Model.delete", + "Model:8", + "Model._get_FIELD_display", + "Model._get_next_or_previous_by_FIELD", + "Model._get_next_or_previous_in_order", + "Model.prepare_database_save", + "Model.validate_unique", + "Model._get_unique_checks", + "Model._perform_unique_checks", + "Model._perform_date_checks", + "Model.date_error_message", + "Model.unique_error_message", + "Model.full_clean", + "Model.clean_fields", + "Model.check", + "Model._check_default_pk", + "Model._check_swappable", + "Model._check_model", + "Model._check_m2m_through_same_relationship", + "Model._check_id_field", + "Model._check_field_name_clashes", + "Model._check_column_name_clashes", + "Model._check_model_name_db_lookup_clashes", + "Model._check_property_name_related_field_accessor_clashes", + "Model._check_single_primary_key", + "Model._check_index_together", + "Model._check_unique_together", + "Model._check_indexes", + "Model._check_local_fields", + "Model._check_ordering", + "Model._check_long_column_names", + "Model._check_constraints", + "method_set_order", + "method_get_order", + "make_foreign_order_accessors", + "model_unpickle" + ] + }, + "alternative_spans": { + "django/db/models/base.py": [ + "imports", + "Deferred.__repr__", + "subclass_exception", + "_has_contribute_to_class", + "ModelBase", + "ModelBase.__new__", + "ModelBase._prepare", + "ModelStateFieldsCacheDescriptor.__get__", + "ModelState", + "Model.__init__", + "Model.from_db", + "Model.__hash__", + "Model.__setstate__", + "Model:2", + "Model.get_deferred_fields", + "Model.refresh_from_db", + "Model.save", + "Model:4", + "Model.save_base", + "Model._save_parents", + "Model._save_table", + "Model._do_update", + "Model._do_insert", + "Model._prepare_related_fields_for_save", + "Model.delete", + "Model:8", + "Model._get_FIELD_display", + "Model._get_next_or_previous_by_FIELD", + "Model._get_next_or_previous_in_order", + "Model.prepare_database_save", + "Model.validate_unique", + "Model._get_unique_checks", + "Model._perform_unique_checks", + "Model._perform_date_checks", + "Model.date_error_message", + "Model.unique_error_message", + "Model.full_clean", + "Model.clean_fields", + "Model.check", + "Model._check_default_pk", + "Model._check_swappable", + "Model._check_model", + "Model._check_m2m_through_same_relationship", + "Model._check_id_field", + "Model._check_field_name_clashes", + "Model._check_column_name_clashes", + "Model._check_model_name_db_lookup_clashes", + "Model._check_property_name_related_field_accessor_clashes", + "Model._check_single_primary_key", + "Model._check_index_together", + "Model._check_unique_together", + "Model._check_indexes", + "Model._check_local_fields", + "Model._check_ordering", + "Model._check_long_column_names", + "Model._check_constraints", + "method_set_order", + "method_get_order", + "make_foreign_order_accessors", + "model_unpickle" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/db/models/base.py": [ + "Model.save", + "Model._prepare_related_fields_for_save" + ] + }, + "alternative_spans": { + "django/db/models/base.py": [ + "Model.save", + "Model._prepare_related_fields_for_save" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/db/models/base.py": [ + "Model._prepare_related_fields_for_save" + ] + }, + "alternative_spans": { + "django/db/models/base.py": [ + "Model._prepare_related_fields_for_save" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/db/models/base.py": [ + "Model._prepare_related_fields_for_save" + ] + }, + "alternative_spans": { + "django/db/models/base.py": [ + "Model._prepare_related_fields_for_save" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/db/models/base.py": [ + "Model._prepare_related_fields_for_save" + ] + }, + "alternative_spans": { + "django/db/models/base.py": [ + "Model._prepare_related_fields_for_save" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "django/db/models/base.py": [ + "Model._prepare_related_fields_for_save", + "Model.delete" + ] + }, + "alternative_spans": { + "django/db/models/base.py": [ + "Model._prepare_related_fields_for_save", + "Model.delete" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/db/models/base.py": [ + "Model._prepare_related_fields_for_save" + ] + }, + "alternative_spans": { + "django/db/models/base.py": [ + "Model._prepare_related_fields_for_save" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-14016", + "repo": "django/django", + "base_commit": "1710cdbe79c90665046034fe1700933d038d90ad", + "problem_statement": "\"TypeError: cannot pickle\" when applying | operator to a Q object\nDescription\n\t \n\t\t(last modified by Daniel Izquierdo)\n\t \nUsing a reference to a non-pickleable type of object such as dict_keys in a Q object makes the | operator fail:\n>>> from django.db.models import Q\n>>> Q(x__in={}.keys())\n\n>>> Q() | Q(x__in={}.keys())\nTraceback (most recent call last):\n...\nTypeError: cannot pickle 'dict_keys' object\nEven though this particular example could be solved by doing Q() | Q(x__in={}) it still feels like using .keys() should work.\nI can work on a patch if there's agreement that this should not crash.\n", + "golden_patch": "diff --git a/django/db/models/query_utils.py b/django/db/models/query_utils.py\n--- a/django/db/models/query_utils.py\n+++ b/django/db/models/query_utils.py\n@@ -5,7 +5,6 @@\n large and/or so that they can be used by other modules without getting into\n circular import difficulties.\n \"\"\"\n-import copy\n import functools\n import inspect\n from collections import namedtuple\n@@ -46,10 +45,12 @@ def _combine(self, other, conn):\n \n # If the other Q() is empty, ignore it and just use `self`.\n if not other:\n- return copy.deepcopy(self)\n+ _, args, kwargs = self.deconstruct()\n+ return type(self)(*args, **kwargs)\n # Or if this Q is empty, ignore it and just use `other`.\n elif not self:\n- return copy.deepcopy(other)\n+ _, args, kwargs = other.deconstruct()\n+ return type(other)(*args, **kwargs)\n \n obj = type(self)()\n obj.connector = conn\n", + "test_patch": "diff --git a/tests/queries/test_q.py b/tests/queries/test_q.py\n--- a/tests/queries/test_q.py\n+++ b/tests/queries/test_q.py\n@@ -8,6 +8,10 @@ def test_combine_and_empty(self):\n self.assertEqual(q & Q(), q)\n self.assertEqual(Q() & q, q)\n \n+ q = Q(x__in={}.keys())\n+ self.assertEqual(q & Q(), q)\n+ self.assertEqual(Q() & q, q)\n+\n def test_combine_and_both_empty(self):\n self.assertEqual(Q() & Q(), Q())\n \n@@ -16,6 +20,10 @@ def test_combine_or_empty(self):\n self.assertEqual(q | Q(), q)\n self.assertEqual(Q() | q, q)\n \n+ q = Q(x__in={}.keys())\n+ self.assertEqual(q | Q(), q)\n+ self.assertEqual(Q() | q, q)\n+\n def test_combine_or_both_empty(self):\n self.assertEqual(Q() | Q(), Q())\n \n", + "fail_to_pass": "[\"test_combine_and_empty (queries.test_q.QTests)\", \"test_combine_or_empty (queries.test_q.QTests)\"]", + "pass_to_pass": "[\"test_combine_and_both_empty (queries.test_q.QTests)\", \"test_combine_not_q_object (queries.test_q.QTests)\", \"test_combine_or_both_empty (queries.test_q.QTests)\", \"test_deconstruct (queries.test_q.QTests)\", \"test_deconstruct_and (queries.test_q.QTests)\", \"test_deconstruct_multiple_kwargs (queries.test_q.QTests)\", \"test_deconstruct_negated (queries.test_q.QTests)\", \"test_deconstruct_nested (queries.test_q.QTests)\", \"test_deconstruct_or (queries.test_q.QTests)\", \"test_reconstruct (queries.test_q.QTests)\", \"test_reconstruct_and (queries.test_q.QTests)\", \"test_reconstruct_negated (queries.test_q.QTests)\", \"test_reconstruct_or (queries.test_q.QTests)\"]", + "expected_spans": { + "django/db/models/query_utils.py": [ + "docstring", + "Q._combine" + ] + }, + "test_file_spans": { + "tests/queries/test_q.py": [ + "QTests.test_combine_and_both_empty", + "QTests.test_combine_or_both_empty" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/db/models/query_utils.py": [ + "imports", + "Q.__init__" + ], + "django/utils/tree.py": [ + "Node.__hash__" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "imports", + "Q.__init__" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q.__init__" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q.__init__" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q._combine", + "Q.__or__" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q._combine", + "Q.__or__" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/db/models/query_utils.py": [ + "imports", + "Q", + "Q.__init__", + "Q._combine", + "Q.resolve_expression", + "Q.deconstruct", + "DeferredAttribute.__init__", + "DeferredAttribute.__get__", + "RegisterLookupMixin.get_lookups", + "select_related_descend", + "check_rel_lookup_compatibility", + "FilteredRelation.__eq__", + "FilteredRelation.resolve_expression", + "FilteredRelation.as_sql" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "imports", + "Q", + "Q.__init__", + "Q._combine", + "Q.resolve_expression", + "Q.deconstruct", + "DeferredAttribute.__init__", + "DeferredAttribute.__get__", + "RegisterLookupMixin.get_lookups", + "select_related_descend", + "check_rel_lookup_compatibility", + "FilteredRelation.__eq__", + "FilteredRelation.resolve_expression", + "FilteredRelation.as_sql" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/db/models/query_utils.py": [ + "docstring", + "Q", + "Q.__init__", + "Q._combine", + "Q.resolve_expression" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "docstring", + "Q", + "Q.__init__", + "Q._combine", + "Q.resolve_expression" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q", + "Q._combine" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q", + "Q._combine" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q.__init__" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q.__init__" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q.__init__" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q.__init__" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q", + "Q._combine" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q", + "Q._combine" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q.__init__" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q.__init__" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q._combine", + "Q.__or__" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q._combine", + "Q.__or__" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q", + "Q._combine" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q", + "Q._combine" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q.__init__", + "Q._combine", + "Q.resolve_expression" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q.__init__", + "Q._combine", + "Q.resolve_expression" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q.__init__" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q.__init__" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/db/models/query_utils.py": [ + "imports", + "Q._combine" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "imports", + "Q._combine" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "django/db/models/query_utils.py": [ + "imports", + "Q.__init__", + "Q._combine" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "imports", + "Q.__init__", + "Q._combine" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240523_aider", + "spans": { + "django/db/models/query_utils.py": [ + "imports", + "Q.__init__" + ] + } + }, + { + "run_name": "20240524_opencsg_starship_gpt4", + "spans": { + "django/db/models/query_utils.py": [ + "Q.__init__" + ] + } + }, + { + "run_name": "20240925_hyperagent_lite1", + "spans": { + "django/db/models/query_utils.py": [ + "Q._combine", + "Q.__or__" + ] + } + }, + { + "run_name": "20240702_codestory_aide_mixed", + "spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + } + }, + { + "run_name": "20240806_SuperCoder2.0", + "spans": { + "django/db/models/query_utils.py": [ + "imports", + "Q", + "Q.__init__", + "Q._combine", + "Q.resolve_expression", + "Q.deconstruct", + "DeferredAttribute.__init__", + "DeferredAttribute.__get__", + "RegisterLookupMixin.get_lookups", + "select_related_descend", + "check_rel_lookup_compatibility", + "FilteredRelation.__eq__", + "FilteredRelation.resolve_expression", + "FilteredRelation.as_sql" + ] + } + }, + { + "run_name": "20240908_infant_gpt4o", + "spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + } + }, + { + "run_name": "20240829_Isoform", + "spans": { + "django/db/models/query_utils.py": [ + "Q", + "Q._combine" + ] + } + }, + { + "run_name": "20241028_agentless-1.5_gpt4o", + "spans": { + "django/db/models/query_utils.py": [ + "Q.__init__" + ] + } + }, + { + "run_name": "20240622_Lingma_Agent", + "spans": { + "django/db/models/query_utils.py": [ + "Q.__init__" + ] + } + }, + { + "run_name": "20241016_IBM-SWE-1.0", + "spans": { + "django/db/models/query_utils.py": [ + "Q", + "Q._combine" + ] + } + }, + { + "run_name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "spans": { + "django/db/models/query_utils.py": [ + "Q.__init__" + ] + } + }, + { + "run_name": "20240612_IBM_Research_Agent101", + "spans": { + "django/db/models/query_utils.py": [ + "Q._combine", + "Q.__or__" + ] + } + }, + { + "run_name": "20240621_autocoderover-v20240620", + "spans": { + "django/db/models/query_utils.py": [ + "Q", + "Q._combine" + ] + } + }, + { + "run_name": "20240617_factory_code_droid", + "spans": { + "django/db/models/query_utils.py": [ + "Q.__init__", + "Q._combine", + "Q.resolve_expression" + ] + } + }, + { + "run_name": "20240912_marscode-agent-dev", + "spans": { + "django/db/models/query_utils.py": [ + "Q.__init__" + ] + } + }, + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "django/db/models/query_utils.py": [ + "imports", + "Q._combine" + ] + } + }, + { + "run_name": "20240620_sweagent_claude3.5sonnet", + "spans": { + "django/db/models/query_utils.py": [ + "imports", + "Q.__init__", + "Q._combine" + ] + } + }, + { + "run_name": "20240721_amazon-q-developer-agent-20240719-dev", + "spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + } + } + ] + }, + { + "instance_id": "django__django-14017", + "repo": "django/django", + "base_commit": "466920f6d726eee90d5566e0a9948e92b33a122e", + "problem_statement": "Q(...) & Exists(...) raises a TypeError\nDescription\n\t\nExists(...) & Q(...) works, but Q(...) & Exists(...) raise a TypeError\nHere's a minimal example:\nIn [3]: Exists(Product.objects.all()) & Q()\nOut[3]: , (AND: ))>\nIn [4]: Q() & Exists(Product.objects.all())\n---------------------------------------------------------------------------\nTypeError\t\t\t\t\t\t\t\t Traceback (most recent call last)\n in \n----> 1 Q() & Exists(Product.objects.all())\n~/Code/venv/ecom/lib/python3.8/site-packages/django/db/models/query_utils.py in __and__(self, other)\n\t 90 \n\t 91\t def __and__(self, other):\n---> 92\t\t return self._combine(other, self.AND)\n\t 93 \n\t 94\t def __invert__(self):\n~/Code/venv/ecom/lib/python3.8/site-packages/django/db/models/query_utils.py in _combine(self, other, conn)\n\t 71\t def _combine(self, other, conn):\n\t 72\t\t if not isinstance(other, Q):\n---> 73\t\t\t raise TypeError(other)\n\t 74 \n\t 75\t\t # If the other Q() is empty, ignore it and just use `self`.\nTypeError: \nThe & (and |) operators should be commutative on Q-Exists pairs, but it's not\nI think there's a missing definition of __rand__ somewhere.\n", + "golden_patch": "diff --git a/django/db/models/query_utils.py b/django/db/models/query_utils.py\n--- a/django/db/models/query_utils.py\n+++ b/django/db/models/query_utils.py\n@@ -40,7 +40,7 @@ def __init__(self, *args, _connector=None, _negated=False, **kwargs):\n super().__init__(children=[*args, *sorted(kwargs.items())], connector=_connector, negated=_negated)\n \n def _combine(self, other, conn):\n- if not isinstance(other, Q):\n+ if not(isinstance(other, Q) or getattr(other, 'conditional', False) is True):\n raise TypeError(other)\n \n # If the other Q() is empty, ignore it and just use `self`.\n", + "test_patch": "diff --git a/tests/expressions/tests.py b/tests/expressions/tests.py\n--- a/tests/expressions/tests.py\n+++ b/tests/expressions/tests.py\n@@ -815,6 +815,28 @@ def test_boolean_expression_combined(self):\n Employee.objects.filter(Exists(is_poc) | Q(salary__lt=15)),\n [self.example_inc.ceo, self.max],\n )\n+ self.assertCountEqual(\n+ Employee.objects.filter(Q(salary__gte=30) & Exists(is_ceo)),\n+ [self.max],\n+ )\n+ self.assertCountEqual(\n+ Employee.objects.filter(Q(salary__lt=15) | Exists(is_poc)),\n+ [self.example_inc.ceo, self.max],\n+ )\n+\n+ def test_boolean_expression_combined_with_empty_Q(self):\n+ is_poc = Company.objects.filter(point_of_contact=OuterRef('pk'))\n+ self.gmbh.point_of_contact = self.max\n+ self.gmbh.save()\n+ tests = [\n+ Exists(is_poc) & Q(),\n+ Q() & Exists(is_poc),\n+ Exists(is_poc) | Q(),\n+ Q() | Exists(is_poc),\n+ ]\n+ for conditions in tests:\n+ with self.subTest(conditions):\n+ self.assertCountEqual(Employee.objects.filter(conditions), [self.max])\n \n \n class IterableLookupInnerExpressionsTests(TestCase):\n", + "fail_to_pass": "[\"test_boolean_expression_combined (expressions.tests.BasicExpressionsTests)\", \"test_boolean_expression_combined_with_empty_Q (expressions.tests.BasicExpressionsTests)\"]", + "pass_to_pass": "[\"test_resolve_output_field (expressions.tests.CombinedExpressionTests)\", \"test_deconstruct (expressions.tests.FTests)\", \"test_deepcopy (expressions.tests.FTests)\", \"test_equal (expressions.tests.FTests)\", \"test_hash (expressions.tests.FTests)\", \"test_not_equal_Value (expressions.tests.FTests)\", \"test_and (expressions.tests.CombinableTests)\", \"test_negation (expressions.tests.CombinableTests)\", \"test_or (expressions.tests.CombinableTests)\", \"test_reversed_and (expressions.tests.CombinableTests)\", \"test_reversed_or (expressions.tests.CombinableTests)\", \"test_empty_group_by (expressions.tests.ExpressionWrapperTests)\", \"test_non_empty_group_by (expressions.tests.ExpressionWrapperTests)\", \"test_aggregates (expressions.tests.ReprTests)\", \"test_distinct_aggregates (expressions.tests.ReprTests)\", \"test_expressions (expressions.tests.ReprTests)\", \"test_filtered_aggregates (expressions.tests.ReprTests)\", \"test_functions (expressions.tests.ReprTests)\", \"test_optimizations (expressions.tests.ExistsTests)\", \"test_equal (expressions.tests.SimpleExpressionTests)\", \"test_hash (expressions.tests.SimpleExpressionTests)\", \"test_month_aggregation (expressions.tests.FieldTransformTests)\", \"test_multiple_transforms_in_values (expressions.tests.FieldTransformTests)\", \"test_transform_in_values (expressions.tests.FieldTransformTests)\", \"Complex expressions of different connection types are possible.\", \"We can fill a value in all objects with an other value of the\", \"We can filter for objects, where a value is not equals the value\", \"We can increment a value of all objects in a query set.\", \"test_compile_unresolved (expressions.tests.ValueTests)\", \"test_deconstruct (expressions.tests.ValueTests)\", \"test_deconstruct_output_field (expressions.tests.ValueTests)\", \"test_equal (expressions.tests.ValueTests)\", \"test_equal_output_field (expressions.tests.ValueTests)\", \"test_hash (expressions.tests.ValueTests)\", \"test_raise_empty_expressionlist (expressions.tests.ValueTests)\", \"test_resolve_output_field (expressions.tests.ValueTests)\", \"test_resolve_output_field_failure (expressions.tests.ValueTests)\", \"test_update_TimeField_using_Value (expressions.tests.ValueTests)\", \"test_update_UUIDField_using_Value (expressions.tests.ValueTests)\", \"test_F_reuse (expressions.tests.ExpressionsTests)\", \"Special characters (e.g. %, _ and \\\\) stored in database are\", \"This tests that SQL injection isn't possible using compilation of\", \"test_expressions_in_lookups_join_choice (expressions.tests.IterableLookupInnerExpressionsTests)\", \"test_in_lookup_allows_F_expressions_and_expressions_for_datetimes (expressions.tests.IterableLookupInnerExpressionsTests)\", \"test_in_lookup_allows_F_expressions_and_expressions_for_integers (expressions.tests.IterableLookupInnerExpressionsTests)\", \"test_range_lookup_allows_F_expressions_and_expressions_for_integers (expressions.tests.IterableLookupInnerExpressionsTests)\", \"test_range_lookup_namedtuple (expressions.tests.IterableLookupInnerExpressionsTests)\", \"test_lefthand_addition (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_bitwise_and (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_bitwise_left_shift_operator (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_bitwise_or (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_bitwise_right_shift_operator (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_bitwise_xor (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_bitwise_xor_null (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_division (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_modulo (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_multiplication (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_power (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_subtraction (expressions.tests.ExpressionOperatorTests)\", \"test_lefthand_transformed_field_bitwise_or (expressions.tests.ExpressionOperatorTests)\", \"test_right_hand_addition (expressions.tests.ExpressionOperatorTests)\", \"test_right_hand_division (expressions.tests.ExpressionOperatorTests)\", \"test_right_hand_modulo (expressions.tests.ExpressionOperatorTests)\", \"test_right_hand_multiplication (expressions.tests.ExpressionOperatorTests)\", \"test_right_hand_subtraction (expressions.tests.ExpressionOperatorTests)\", \"test_righthand_power (expressions.tests.ExpressionOperatorTests)\", \"test_date_case_subtraction (expressions.tests.FTimeDeltaTests)\", \"test_date_comparison (expressions.tests.FTimeDeltaTests)\", \"test_date_minus_duration (expressions.tests.FTimeDeltaTests)\", \"test_date_subquery_subtraction (expressions.tests.FTimeDeltaTests)\", \"test_date_subtraction (expressions.tests.FTimeDeltaTests)\", \"test_datetime_subquery_subtraction (expressions.tests.FTimeDeltaTests)\", \"test_datetime_subtraction (expressions.tests.FTimeDeltaTests)\", \"test_datetime_subtraction_microseconds (expressions.tests.FTimeDeltaTests)\", \"test_delta_add (expressions.tests.FTimeDeltaTests)\", \"test_delta_subtract (expressions.tests.FTimeDeltaTests)\", \"test_delta_update (expressions.tests.FTimeDeltaTests)\", \"test_duration_expressions (expressions.tests.FTimeDeltaTests)\", \"test_duration_with_datetime (expressions.tests.FTimeDeltaTests)\", \"test_duration_with_datetime_microseconds (expressions.tests.FTimeDeltaTests)\", \"test_durationfield_add (expressions.tests.FTimeDeltaTests)\", \"test_exclude (expressions.tests.FTimeDeltaTests)\", \"test_invalid_operator (expressions.tests.FTimeDeltaTests)\", \"test_mixed_comparisons2 (expressions.tests.FTimeDeltaTests)\", \"test_multiple_query_compilation (expressions.tests.FTimeDeltaTests)\", \"test_negative_timedelta_update (expressions.tests.FTimeDeltaTests)\", \"test_query_clone (expressions.tests.FTimeDeltaTests)\", \"test_time_subquery_subtraction (expressions.tests.FTimeDeltaTests)\", \"test_time_subtraction (expressions.tests.FTimeDeltaTests)\", \"test_aggregate_rawsql_annotation (expressions.tests.BasicExpressionsTests)\", \"test_aggregate_subquery_annotation (expressions.tests.BasicExpressionsTests)\", \"test_annotate_values_aggregate (expressions.tests.BasicExpressionsTests)\", \"test_annotate_values_count (expressions.tests.BasicExpressionsTests)\", \"test_annotate_values_filter (expressions.tests.BasicExpressionsTests)\", \"test_annotation_with_nested_outerref (expressions.tests.BasicExpressionsTests)\", \"test_annotation_with_outerref (expressions.tests.BasicExpressionsTests)\", \"test_annotations_within_subquery (expressions.tests.BasicExpressionsTests)\", \"test_arithmetic (expressions.tests.BasicExpressionsTests)\", \"test_case_in_filter_if_boolean_output_field (expressions.tests.BasicExpressionsTests)\", \"test_exist_single_field_output_field (expressions.tests.BasicExpressionsTests)\", \"test_exists_in_filter (expressions.tests.BasicExpressionsTests)\", \"test_explicit_output_field (expressions.tests.BasicExpressionsTests)\", \"test_filter_inter_attribute (expressions.tests.BasicExpressionsTests)\", \"test_filter_with_join (expressions.tests.BasicExpressionsTests)\", \"test_filtering_on_annotate_that_uses_q (expressions.tests.BasicExpressionsTests)\", \"test_filtering_on_q_that_is_boolean (expressions.tests.BasicExpressionsTests)\", \"test_filtering_on_rawsql_that_is_boolean (expressions.tests.BasicExpressionsTests)\", \"test_in_subquery (expressions.tests.BasicExpressionsTests)\", \"test_incorrect_field_in_F_expression (expressions.tests.BasicExpressionsTests)\", \"test_incorrect_joined_field_in_F_expression (expressions.tests.BasicExpressionsTests)\", \"test_nested_outerref_with_function (expressions.tests.BasicExpressionsTests)\", \"test_nested_subquery (expressions.tests.BasicExpressionsTests)\", \"test_nested_subquery_join_outer_ref (expressions.tests.BasicExpressionsTests)\", \"test_nested_subquery_outer_ref_2 (expressions.tests.BasicExpressionsTests)\", \"test_nested_subquery_outer_ref_with_autofield (expressions.tests.BasicExpressionsTests)\", \"test_new_object_create (expressions.tests.BasicExpressionsTests)\", \"test_new_object_save (expressions.tests.BasicExpressionsTests)\", \"test_object_create_with_aggregate (expressions.tests.BasicExpressionsTests)\", \"test_object_update (expressions.tests.BasicExpressionsTests)\", \"test_object_update_fk (expressions.tests.BasicExpressionsTests)\", \"test_object_update_unsaved_objects (expressions.tests.BasicExpressionsTests)\", \"test_order_by_exists (expressions.tests.BasicExpressionsTests)\", \"test_order_by_multiline_sql (expressions.tests.BasicExpressionsTests)\", \"test_order_of_operations (expressions.tests.BasicExpressionsTests)\", \"test_outerref (expressions.tests.BasicExpressionsTests)\", \"test_outerref_mixed_case_table_name (expressions.tests.BasicExpressionsTests)\", \"test_outerref_with_operator (expressions.tests.BasicExpressionsTests)\", \"test_parenthesis_priority (expressions.tests.BasicExpressionsTests)\", \"test_pickle_expression (expressions.tests.BasicExpressionsTests)\", \"test_subquery (expressions.tests.BasicExpressionsTests)\", \"test_subquery_eq (expressions.tests.BasicExpressionsTests)\", \"test_subquery_filter_by_aggregate (expressions.tests.BasicExpressionsTests)\", \"test_subquery_filter_by_lazy (expressions.tests.BasicExpressionsTests)\", \"test_subquery_group_by_outerref_in_filter (expressions.tests.BasicExpressionsTests)\", \"test_subquery_in_filter (expressions.tests.BasicExpressionsTests)\", \"test_subquery_references_joined_table_twice (expressions.tests.BasicExpressionsTests)\", \"test_ticket_11722_iexact_lookup (expressions.tests.BasicExpressionsTests)\", \"test_ticket_16731_startswith_lookup (expressions.tests.BasicExpressionsTests)\", \"test_ticket_18375_chained_filters (expressions.tests.BasicExpressionsTests)\", \"test_ticket_18375_join_reuse (expressions.tests.BasicExpressionsTests)\", \"test_ticket_18375_kwarg_ordering (expressions.tests.BasicExpressionsTests)\", \"test_ticket_18375_kwarg_ordering_2 (expressions.tests.BasicExpressionsTests)\", \"test_update (expressions.tests.BasicExpressionsTests)\", \"test_update_inherited_field_value (expressions.tests.BasicExpressionsTests)\", \"test_update_with_fk (expressions.tests.BasicExpressionsTests)\", \"test_update_with_none (expressions.tests.BasicExpressionsTests)\", \"test_uuid_pk_subquery (expressions.tests.BasicExpressionsTests)\"]", + "expected_spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + }, + "test_file_spans": { + "tests/expressions/tests.py": [ + "BasicExpressionsTests" + ] + }, + "resolved_by": [ + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/models/expressions.py": [ + "Combinable.__and__", + "BaseExpression.resolve_expression", + "Exists" + ], + "django/db/models/query_utils.py": [ + "Q._combine", + "Q" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q._combine", + "Q" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/db/models/expressions.py": [ + "Exists.as_sql" + ], + "django/db/models/query_utils.py": [ + "Q._combine" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/db/models/expressions.py": [ + "Exists.as_sql" + ], + "django/db/models/query_utils.py": [ + "Q._combine" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q._combine" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q._combine", + "Q.__and__" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q._combine", + "Q.__and__" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/db/models/query_utils.py": [ + "Q._combine", + "Q.deconstruct" + ] + }, + "alternative_spans": { + "django/db/models/query_utils.py": [ + "Q._combine", + "Q.deconstruct" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "pytest-dev__pytest-8365", + "repo": "pytest-dev/pytest", + "base_commit": "4964b468c83c06971eb743fbc57cc404f760c573", + "problem_statement": "tmpdir creation fails when the username contains illegal characters for directory names\n`tmpdir`, `tmpdir_factory` and `tmp_path_factory` rely on `getpass.getuser()` for determining the `basetemp` directory. I found that the user name returned by `getpass.getuser()` may return characters that are not allowed for directory names. This may lead to errors while creating the temporary directory.\r\n\r\nThe situation in which I reproduced this issue was while being logged in through an ssh connection into my Windows 10 x64 Enterprise version (1909) using an OpenSSH_for_Windows_7.7p1 server. In this configuration the command `python -c \"import getpass; print(getpass.getuser())\"` returns my domain username e.g. `contoso\\john_doe` instead of `john_doe` as when logged in regularly using a local session.\r\n\r\nWhen trying to create a temp directory in pytest through e.g. `tmpdir_factory.mktemp('foobar')` this fails with the following error message:\r\n```\r\nself = WindowsPath('C:/Users/john_doe/AppData/Local/Temp/pytest-of-contoso/john_doe')\r\nmode = 511, parents = False, exist_ok = True\r\n\r\n def mkdir(self, mode=0o777, parents=False, exist_ok=False):\r\n \"\"\"\r\n Create a new directory at this given path.\r\n \"\"\"\r\n if self._closed:\r\n self._raise_closed()\r\n try:\r\n> self._accessor.mkdir(self, mode)\r\nE FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\\\Users\\\\john_doe\\\\AppData\\\\Local\\\\Temp\\\\pytest-of-contoso\\\\john_doe'\r\n\r\nC:\\Python38\\lib\\pathlib.py:1266: FileNotFoundError\r\n```\r\n\r\nI could also reproduce this without the complicated ssh/windows setup with pytest 6.2.2 using the following commands from a `cmd`:\r\n```bat\r\necho def test_tmpdir(tmpdir):>test_tmp.py\r\necho pass>>test_tmp.py\r\nset LOGNAME=contoso\\john_doe\r\npy.test test_tmp.py\r\n```\r\n\r\nThanks for having a look at this!\n", + "golden_patch": "diff --git a/src/_pytest/tmpdir.py b/src/_pytest/tmpdir.py\n--- a/src/_pytest/tmpdir.py\n+++ b/src/_pytest/tmpdir.py\n@@ -115,7 +115,12 @@ def getbasetemp(self) -> Path:\n # use a sub-directory in the temproot to speed-up\n # make_numbered_dir() call\n rootdir = temproot.joinpath(f\"pytest-of-{user}\")\n- rootdir.mkdir(exist_ok=True)\n+ try:\n+ rootdir.mkdir(exist_ok=True)\n+ except OSError:\n+ # getuser() likely returned illegal characters for the platform, use unknown back off mechanism\n+ rootdir = temproot.joinpath(\"pytest-of-unknown\")\n+ rootdir.mkdir(exist_ok=True)\n basetemp = make_numbered_dir_with_cleanup(\n prefix=\"pytest-\", root=rootdir, keep=3, lock_timeout=LOCK_TIMEOUT\n )\n", + "test_patch": "diff --git a/testing/test_tmpdir.py b/testing/test_tmpdir.py\n--- a/testing/test_tmpdir.py\n+++ b/testing/test_tmpdir.py\n@@ -11,6 +11,7 @@\n import pytest\n from _pytest import pathlib\n from _pytest.config import Config\n+from _pytest.monkeypatch import MonkeyPatch\n from _pytest.pathlib import cleanup_numbered_dir\n from _pytest.pathlib import create_cleanup_lock\n from _pytest.pathlib import make_numbered_dir\n@@ -445,3 +446,14 @@ def test(tmp_path):\n # running a second time and ensure we don't crash\n result = pytester.runpytest(\"--basetemp=tmp\")\n assert result.ret == 0\n+\n+\n+def test_tmp_path_factory_handles_invalid_dir_characters(\n+ tmp_path_factory: TempPathFactory, monkeypatch: MonkeyPatch\n+) -> None:\n+ monkeypatch.setattr(\"getpass.getuser\", lambda: \"os/<:*?;>agnostic\")\n+ # _basetemp / _given_basetemp are cached / set in parallel runs, patch them\n+ monkeypatch.setattr(tmp_path_factory, \"_basetemp\", None)\n+ monkeypatch.setattr(tmp_path_factory, \"_given_basetemp\", None)\n+ p = tmp_path_factory.getbasetemp()\n+ assert \"pytest-of-unknown\" in str(p)\n", + "fail_to_pass": "[\"testing/test_tmpdir.py::test_tmp_path_factory_handles_invalid_dir_characters\"]", + "pass_to_pass": "[\"testing/test_tmpdir.py::TestTempdirHandler::test_mktemp\", \"testing/test_tmpdir.py::TestTempdirHandler::test_tmppath_relative_basetemp_absolute\", \"testing/test_tmpdir.py::test_get_user_uid_not_found\", \"testing/test_tmpdir.py::TestNumberedDir::test_make\", \"testing/test_tmpdir.py::TestNumberedDir::test_cleanup_lock_create\", \"testing/test_tmpdir.py::TestNumberedDir::test_lock_register_cleanup_removal\", \"testing/test_tmpdir.py::TestNumberedDir::test_cleanup_keep\", \"testing/test_tmpdir.py::TestNumberedDir::test_cleanup_locked\", \"testing/test_tmpdir.py::TestNumberedDir::test_cleanup_ignores_symlink\", \"testing/test_tmpdir.py::TestNumberedDir::test_removal_accepts_lock\", \"testing/test_tmpdir.py::TestRmRf::test_rm_rf\", \"testing/test_tmpdir.py::TestRmRf::test_rm_rf_with_read_only_file\", \"testing/test_tmpdir.py::TestRmRf::test_rm_rf_with_read_only_directory\", \"testing/test_tmpdir.py::TestRmRf::test_on_rm_rf_error\", \"testing/test_tmpdir.py::test_tmpdir_equals_tmp_path\", \"testing/test_tmpdir.py::test_tmpdir_fixture\", \"testing/test_tmpdir.py::TestConfigTmpdir::test_getbasetemp_custom_removes_old\", \"testing/test_tmpdir.py::test_mktemp[mypath-True]\", \"testing/test_tmpdir.py::test_mktemp[/mypath1-False]\", \"testing/test_tmpdir.py::test_mktemp[./mypath1-True]\", \"testing/test_tmpdir.py::test_mktemp[../mypath3-False]\", \"testing/test_tmpdir.py::test_mktemp[../../mypath4-False]\", \"testing/test_tmpdir.py::test_mktemp[mypath5/..-False]\", \"testing/test_tmpdir.py::test_mktemp[mypath6/../mypath6-True]\", \"testing/test_tmpdir.py::test_mktemp[mypath7/../mypath7/..-False]\", \"testing/test_tmpdir.py::test_tmpdir_always_is_realpath\", \"testing/test_tmpdir.py::test_tmp_path_always_is_realpath\", \"testing/test_tmpdir.py::test_tmpdir_too_long_on_parametrization\", \"testing/test_tmpdir.py::test_tmpdir_factory\", \"testing/test_tmpdir.py::test_tmpdir_fallback_tox_env\", \"testing/test_tmpdir.py::test_tmpdir_fallback_uid_not_found\", \"testing/test_tmpdir.py::test_basetemp_with_read_only_files\"]", + "expected_spans": { + "src/_pytest/tmpdir.py": [ + "TempPathFactory.getbasetemp" + ] + }, + "test_file_spans": { + "testing/test_tmpdir.py": [ + "imports" + ] + }, + "resolved_by": [ + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "src/_pytest/tmpdir.py": [ + "get_user" + ], + "testing/test_tmpdir.py": [ + "TestNumberedDir" + ] + }, + "alternative_spans": { + "src/_pytest/tmpdir.py": [ + "get_user" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "src/_pytest/tmpdir.py": [ + "get_user" + ], + "testing/test_tmpdir.py": [ + "TestNumberedDir" + ] + }, + "alternative_spans": { + "src/_pytest/tmpdir.py": [ + "get_user" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240925_hyperagent_lite1", + "spans": { + "src/_pytest/tmpdir.py": [ + "get_user" + ] + } + }, + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "src/_pytest/tmpdir.py": [ + "get_user" + ] + } + } + ] + }, + { + "instance_id": "sympy__sympy-21055", + "repo": "sympy/sympy", + "base_commit": "748ce73479ee2cd5c861431091001cc18943c735", + "problem_statement": "`refine()` does not understand how to simplify complex arguments\nJust learned about the refine-function, which would come in handy frequently for me. But\r\n`refine()` does not recognize that argument functions simplify for real numbers.\r\n\r\n```\r\n>>> from sympy import * \r\n>>> var('a,x') \r\n>>> J = Integral(sin(x)*exp(-a*x),(x,0,oo)) \r\n>>> J.doit()\r\n\tPiecewise((1/(a**2 + 1), 2*Abs(arg(a)) < pi), (Integral(exp(-a*x)*sin(x), (x, 0, oo)), True))\r\n>>> refine(J.doit(),Q.positive(a)) \r\n Piecewise((1/(a**2 + 1), 2*Abs(arg(a)) < pi), (Integral(exp(-a*x)*sin(x), (x, 0, oo)), True))\r\n>>> refine(abs(a),Q.positive(a)) \r\n\ta\r\n>>> refine(arg(a),Q.positive(a)) \r\n\targ(a)\r\n```\r\nI cann't find any open issues identifying this. Easy to fix, though.\r\n\r\n\n", + "golden_patch": "diff --git a/sympy/assumptions/refine.py b/sympy/assumptions/refine.py\n--- a/sympy/assumptions/refine.py\n+++ b/sympy/assumptions/refine.py\n@@ -297,6 +297,28 @@ def refine_im(expr, assumptions):\n return - S.ImaginaryUnit * arg\n return _refine_reim(expr, assumptions)\n \n+def refine_arg(expr, assumptions):\n+ \"\"\"\n+ Handler for complex argument\n+\n+ Explanation\n+ ===========\n+\n+ >>> from sympy.assumptions.refine import refine_arg\n+ >>> from sympy import Q, arg\n+ >>> from sympy.abc import x\n+ >>> refine_arg(arg(x), Q.positive(x))\n+ 0\n+ >>> refine_arg(arg(x), Q.negative(x))\n+ pi\n+ \"\"\"\n+ rg = expr.args[0]\n+ if ask(Q.positive(rg), assumptions):\n+ return S.Zero\n+ if ask(Q.negative(rg), assumptions):\n+ return S.Pi\n+ return None\n+\n \n def _refine_reim(expr, assumptions):\n # Helper function for refine_re & refine_im\n@@ -379,6 +401,7 @@ def refine_matrixelement(expr, assumptions):\n 'atan2': refine_atan2,\n 're': refine_re,\n 'im': refine_im,\n+ 'arg': refine_arg,\n 'sign': refine_sign,\n 'MatrixElement': refine_matrixelement\n } # type: Dict[str, Callable[[Expr, Boolean], Expr]]\n", + "test_patch": "diff --git a/sympy/assumptions/tests/test_refine.py b/sympy/assumptions/tests/test_refine.py\n--- a/sympy/assumptions/tests/test_refine.py\n+++ b/sympy/assumptions/tests/test_refine.py\n@@ -1,5 +1,5 @@\n from sympy import (Abs, exp, Expr, I, pi, Q, Rational, refine, S, sqrt,\n- atan, atan2, nan, Symbol, re, im, sign)\n+ atan, atan2, nan, Symbol, re, im, sign, arg)\n from sympy.abc import w, x, y, z\n from sympy.core.relational import Eq, Ne\n from sympy.functions.elementary.piecewise import Piecewise\n@@ -160,6 +160,10 @@ def test_sign():\n x = Symbol('x', complex=True)\n assert refine(sign(x), Q.zero(x)) == 0\n \n+def test_arg():\n+ x = Symbol('x', complex = True)\n+ assert refine(arg(x), Q.positive(x)) == 0\n+ assert refine(arg(x), Q.negative(x)) == pi\n \n def test_func_args():\n class MyClass(Expr):\n", + "fail_to_pass": "[\"test_arg\"]", + "pass_to_pass": "[\"test_Abs\", \"test_pow1\", \"test_pow2\", \"test_exp\", \"test_Piecewise\", \"test_atan2\", \"test_re\", \"test_im\", \"test_complex\", \"test_sign\", \"test_func_args\", \"test_eval_refine\", \"test_refine_issue_12724\"]", + "expected_spans": { + "sympy/assumptions/refine.py": [ + "refine_im", + "impl" + ] + }, + "test_file_spans": { + "sympy/assumptions/tests/test_refine.py": [ + "imports", + "test_sign" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "sympy/assumptions/refine.py": [ + "impl" + ] + }, + "alternative_spans": { + "sympy/assumptions/refine.py": [ + "impl" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "sympy/assumptions/refine.py": [ + "refine_matrixelement", + "impl" + ], + "sympy/functions/elementary/complexes.py": [ + "Abs.eval", + "Abs", + "arg.eval" + ] + }, + "alternative_spans": { + "sympy/assumptions/refine.py": [ + "refine_matrixelement", + "impl" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "sympy/assumptions/refine.py": [ + "imports", + "impl" + ] + }, + "alternative_spans": { + "sympy/assumptions/refine.py": [ + "imports", + "impl" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sympy/assumptions/refine.py": [ + "refine_abs", + "impl" + ] + }, + "alternative_spans": { + "sympy/assumptions/refine.py": [ + "refine_abs", + "impl" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "sympy/assumptions/refine.py": [ + "impl" + ] + }, + "alternative_spans": { + "sympy/assumptions/refine.py": [ + "impl" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "sympy/assumptions/refine.py": [ + "imports", + "refine", + "refine_atan2" + ] + }, + "alternative_spans": { + "sympy/assumptions/refine.py": [ + "imports", + "refine", + "refine_atan2" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "sympy/functions/elementary/complexes.py": [ + "arg.eval", + "arg" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sympy/assumptions/refine.py": [ + "imports", + "impl" + ] + }, + "alternative_spans": { + "sympy/assumptions/refine.py": [ + "imports", + "impl" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "sympy/assumptions/refine.py": [ + "refine_Pow", + "impl" + ], + "sympy/assumptions/tests/test_refine.py": [] + }, + "alternative_spans": { + "sympy/assumptions/refine.py": [ + "refine_Pow", + "impl" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "sympy/assumptions/refine.py": [ + "refine_matrixelement", + "impl" + ] + }, + "alternative_spans": { + "sympy/assumptions/refine.py": [ + "refine_matrixelement", + "impl" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "sympy/assumptions/refine.py": [ + "refine_Pow", + "impl" + ] + }, + "alternative_spans": { + "sympy/assumptions/refine.py": [ + "refine_Pow", + "impl" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240523_aider", + "spans": { + "sympy/assumptions/refine.py": [ + "impl" + ] + } + }, + { + "run_name": "20240702_codestory_aide_mixed", + "spans": { + "sympy/assumptions/refine.py": [ + "refine_matrixelement", + "impl" + ] + } + }, + { + "run_name": "20240808_RepoGraph_gpt4o", + "spans": { + "sympy/assumptions/refine.py": [ + "imports", + "impl" + ] + } + }, + { + "run_name": "20240908_infant_gpt4o", + "spans": { + "sympy/assumptions/refine.py": [ + "refine_abs", + "impl" + ] + } + }, + { + "run_name": "20240604_CodeR", + "spans": { + "sympy/assumptions/refine.py": [ + "impl" + ] + } + }, + { + "run_name": "20241028_agentless-1.5_gpt4o", + "spans": { + "sympy/assumptions/refine.py": [ + "imports", + "refine", + "refine_atan2" + ] + } + }, + { + "run_name": "20240912_marscode-agent-dev", + "spans": { + "sympy/assumptions/refine.py": [ + "imports", + "impl" + ] + } + }, + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "sympy/assumptions/refine.py": [ + "refine_Pow", + "impl" + ] + } + }, + { + "run_name": "20240620_sweagent_claude3.5sonnet", + "spans": { + "sympy/assumptions/refine.py": [ + "refine_matrixelement", + "impl" + ] + } + }, + { + "run_name": "20240721_amazon-q-developer-agent-20240719-dev", + "spans": { + "sympy/assumptions/refine.py": [ + "refine_Pow", + "impl" + ] + } + } + ] + }, + { + "instance_id": "django__django-14155", + "repo": "django/django", + "base_commit": "2f13c476abe4ba787b6cb71131818341911f43cc", + "problem_statement": "ResolverMatch.__repr__() doesn't handle functools.partial() nicely.\nDescription\n\t \n\t\t(last modified by Nick Pope)\n\t \nWhen a partial function is passed as the view, the __repr__ shows the func argument as functools.partial which isn't very helpful, especially as it doesn't reveal the underlying function or arguments provided.\nBecause a partial function also has arguments provided up front, we need to handle those specially so that they are accessible in __repr__.\nISTM that we can simply unwrap functools.partial objects in ResolverMatch.__init__().\n", + "golden_patch": "diff --git a/django/urls/resolvers.py b/django/urls/resolvers.py\n--- a/django/urls/resolvers.py\n+++ b/django/urls/resolvers.py\n@@ -59,9 +59,16 @@ def __getitem__(self, index):\n return (self.func, self.args, self.kwargs)[index]\n \n def __repr__(self):\n- return \"ResolverMatch(func=%s, args=%s, kwargs=%s, url_name=%s, app_names=%s, namespaces=%s, route=%s)\" % (\n- self._func_path, self.args, self.kwargs, self.url_name,\n- self.app_names, self.namespaces, self.route,\n+ if isinstance(self.func, functools.partial):\n+ func = repr(self.func)\n+ else:\n+ func = self._func_path\n+ return (\n+ 'ResolverMatch(func=%s, args=%r, kwargs=%r, url_name=%r, '\n+ 'app_names=%r, namespaces=%r, route=%r)' % (\n+ func, self.args, self.kwargs, self.url_name,\n+ self.app_names, self.namespaces, self.route,\n+ )\n )\n \n \n", + "test_patch": "diff --git a/tests/urlpatterns_reverse/tests.py b/tests/urlpatterns_reverse/tests.py\n--- a/tests/urlpatterns_reverse/tests.py\n+++ b/tests/urlpatterns_reverse/tests.py\n@@ -1141,10 +1141,30 @@ def test_repr(self):\n self.assertEqual(\n repr(resolve('/no_kwargs/42/37/')),\n \"ResolverMatch(func=urlpatterns_reverse.views.empty_view, \"\n- \"args=('42', '37'), kwargs={}, url_name=no-kwargs, app_names=[], \"\n- \"namespaces=[], route=^no_kwargs/([0-9]+)/([0-9]+)/$)\",\n+ \"args=('42', '37'), kwargs={}, url_name='no-kwargs', app_names=[], \"\n+ \"namespaces=[], route='^no_kwargs/([0-9]+)/([0-9]+)/$')\",\n )\n \n+ @override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')\n+ def test_repr_functools_partial(self):\n+ tests = [\n+ ('partial', 'template.html'),\n+ ('partial_nested', 'nested_partial.html'),\n+ ('partial_wrapped', 'template.html'),\n+ ]\n+ for name, template_name in tests:\n+ with self.subTest(name=name):\n+ func = (\n+ f\"functools.partial({views.empty_view!r}, \"\n+ f\"template_name='{template_name}')\"\n+ )\n+ self.assertEqual(\n+ repr(resolve(f'/{name}/')),\n+ f\"ResolverMatch(func={func}, args=(), kwargs={{}}, \"\n+ f\"url_name='{name}', app_names=[], namespaces=[], \"\n+ f\"route='{name}/')\",\n+ )\n+\n \n @override_settings(ROOT_URLCONF='urlpatterns_reverse.erroneous_urls')\n class ErroneousViewTests(SimpleTestCase):\n", + "fail_to_pass": "[\"test_repr (urlpatterns_reverse.tests.ResolverMatchTests)\", \"test_repr_functools_partial (urlpatterns_reverse.tests.ResolverMatchTests)\", \"test_resolver_match_on_request (urlpatterns_reverse.tests.ResolverMatchTests)\"]", + "pass_to_pass": "[\"test_include_2_tuple (urlpatterns_reverse.tests.IncludeTests)\", \"test_include_2_tuple_namespace (urlpatterns_reverse.tests.IncludeTests)\", \"test_include_3_tuple (urlpatterns_reverse.tests.IncludeTests)\", \"test_include_3_tuple_namespace (urlpatterns_reverse.tests.IncludeTests)\", \"test_include_4_tuple (urlpatterns_reverse.tests.IncludeTests)\", \"test_include_app_name (urlpatterns_reverse.tests.IncludeTests)\", \"test_include_app_name_namespace (urlpatterns_reverse.tests.IncludeTests)\", \"test_include_namespace (urlpatterns_reverse.tests.IncludeTests)\", \"test_include_urls (urlpatterns_reverse.tests.IncludeTests)\", \"URLResolver should raise an exception when no urlpatterns exist.\", \"test_invalid_regex (urlpatterns_reverse.tests.ErroneousViewTests)\", \"test_noncallable_view (urlpatterns_reverse.tests.ErroneousViewTests)\", \"test_attributeerror_not_hidden (urlpatterns_reverse.tests.ViewLoadingTests)\", \"test_module_does_not_exist (urlpatterns_reverse.tests.ViewLoadingTests)\", \"test_non_string_value (urlpatterns_reverse.tests.ViewLoadingTests)\", \"test_not_callable (urlpatterns_reverse.tests.ViewLoadingTests)\", \"test_parent_module_does_not_exist (urlpatterns_reverse.tests.ViewLoadingTests)\", \"test_string_without_dot (urlpatterns_reverse.tests.ViewLoadingTests)\", \"test_view_does_not_exist (urlpatterns_reverse.tests.ViewLoadingTests)\", \"test_view_loading (urlpatterns_reverse.tests.ViewLoadingTests)\", \"test_callable_handlers (urlpatterns_reverse.tests.ErrorHandlerResolutionTests)\", \"test_named_handlers (urlpatterns_reverse.tests.ErrorHandlerResolutionTests)\", \"test_invalid_resolve (urlpatterns_reverse.tests.LookaheadTests)\", \"test_invalid_reverse (urlpatterns_reverse.tests.LookaheadTests)\", \"test_valid_resolve (urlpatterns_reverse.tests.LookaheadTests)\", \"test_valid_reverse (urlpatterns_reverse.tests.LookaheadTests)\", \"test_no_illegal_imports (urlpatterns_reverse.tests.ReverseShortcutTests)\", \"test_redirect_to_object (urlpatterns_reverse.tests.ReverseShortcutTests)\", \"test_redirect_to_url (urlpatterns_reverse.tests.ReverseShortcutTests)\", \"test_redirect_to_view_name (urlpatterns_reverse.tests.ReverseShortcutTests)\", \"test_redirect_view_object (urlpatterns_reverse.tests.ReverseShortcutTests)\", \"test_reverse_by_path_nested (urlpatterns_reverse.tests.ReverseShortcutTests)\", \"test_resolver_match_on_request_before_resolution (urlpatterns_reverse.tests.ResolverMatchTests)\", \"test_urlpattern_resolve (urlpatterns_reverse.tests.ResolverMatchTests)\", \"test_illegal_args_message (urlpatterns_reverse.tests.URLPatternReverse)\", \"test_illegal_kwargs_message (urlpatterns_reverse.tests.URLPatternReverse)\", \"test_mixing_args_and_kwargs (urlpatterns_reverse.tests.URLPatternReverse)\", \"test_no_args_message (urlpatterns_reverse.tests.URLPatternReverse)\", \"test_non_urlsafe_prefix_with_args (urlpatterns_reverse.tests.URLPatternReverse)\", \"test_patterns_reported (urlpatterns_reverse.tests.URLPatternReverse)\", \"test_prefix_braces (urlpatterns_reverse.tests.URLPatternReverse)\", \"test_prefix_format_char (urlpatterns_reverse.tests.URLPatternReverse)\", \"test_prefix_parenthesis (urlpatterns_reverse.tests.URLPatternReverse)\", \"test_reverse_none (urlpatterns_reverse.tests.URLPatternReverse)\", \"test_script_name_escaping (urlpatterns_reverse.tests.URLPatternReverse)\", \"test_urlpattern_reverse (urlpatterns_reverse.tests.URLPatternReverse)\", \"test_view_not_found_message (urlpatterns_reverse.tests.URLPatternReverse)\", \"test_build_absolute_uri (urlpatterns_reverse.tests.ReverseLazyTest)\", \"test_inserting_reverse_lazy_into_string (urlpatterns_reverse.tests.ReverseLazyTest)\", \"test_redirect_with_lazy_reverse (urlpatterns_reverse.tests.ReverseLazyTest)\", \"test_user_permission_with_lazy_reverse (urlpatterns_reverse.tests.ReverseLazyTest)\", \"Names deployed via dynamic URL objects that require namespaces can't\", \"A default application namespace can be used for lookup.\", \"A default application namespace is sensitive to the current app.\", \"An application namespace without a default is sensitive to the current\", \"Namespaces can be applied to include()'d urlpatterns that set an\", \"Dynamic URL objects can return a (pattern, app_name) 2-tuple, and\", \"Namespace defaults to app_name when including a (pattern, app_name)\", \"current_app shouldn't be used unless it matches the whole path.\", \"Namespaces can be installed anywhere in the URL pattern tree.\", \"Namespaces can be embedded.\", \"Dynamic URL objects can be found using a namespace.\", \"Namespaces can be applied to include()'d urlpatterns.\", \"Using include() with namespaces when there is a regex variable in front\", \"Namespace prefixes can capture variables.\", \"A nested current_app should be split in individual namespaces (#24904).\", \"Namespaces can be nested.\", \"Nonexistent namespaces raise errors.\", \"Normal lookups work as expected.\", \"Normal lookups work on names included from other patterns.\", \"test_special_chars_namespace (urlpatterns_reverse.tests.NamespaceTests)\", \"The list of URLs that come back from a Resolver404 exception contains\", \"test_namespaced_view_detail (urlpatterns_reverse.tests.ResolverTests)\", \"A Resolver404 is raised if resolving doesn't meet the basic\", \"URLResolver._populate() can be called concurrently, but not more\", \"Test repr of URLResolver, especially when urlconf_name is a list\", \"test_resolver_reverse (urlpatterns_reverse.tests.ResolverTests)\", \"URL pattern name arguments don't need to be unique. The last registered\", \"Verifies lazy object returned by reverse_lazy is coerced to\", \"test_view_detail_as_method (urlpatterns_reverse.tests.ResolverTests)\", \"Test reversing an URL from the *overridden* URLconf from inside\", \"Test reversing an URL from the *default* URLconf from inside\", \"test_urlconf (urlpatterns_reverse.tests.RequestURLconfTests)\", \"The URLconf is reset after each request.\", \"test_urlconf_overridden (urlpatterns_reverse.tests.RequestURLconfTests)\", \"Overriding request.urlconf with None will fall back to the default\", \"test_no_handler_exception (urlpatterns_reverse.tests.NoRootUrlConfTests)\", \"If the urls.py doesn't specify handlers, the defaults are used\", \"test_lazy_in_settings (urlpatterns_reverse.tests.ReverseLazySettingsTest)\"]", + "expected_spans": { + "django/urls/resolvers.py": [ + "ResolverMatch.__repr__" + ] + }, + "test_file_spans": { + "tests/urlpatterns_reverse/tests.py": [ + "ResolverMatchTests.test_repr" + ] + }, + "resolved_by": [ + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/urls/resolvers.py": [ + "ResolverMatch.__init__", + "ResolverMatch.__repr__" + ], + "tests/resolve_url/tests.py": [] + }, + "alternative_spans": { + "django/urls/resolvers.py": [ + "ResolverMatch.__init__", + "ResolverMatch.__repr__" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "sympy__sympy-21171", + "repo": "sympy/sympy", + "base_commit": "aa22709cb7df2d7503803d4b2c0baa7aa21440b6", + "problem_statement": "_print_SingularityFunction() got an unexpected keyword argument 'exp'\nOn a Jupyter Notebook cell, type the following:\r\n\r\n```python\r\nfrom sympy import *\r\nfrom sympy.physics.continuum_mechanics import Beam\r\n# Young's modulus\r\nE = symbols(\"E\")\r\n# length of the beam\r\nL = symbols(\"L\")\r\n# concentrated load at the end tip of the beam\r\nF = symbols(\"F\")\r\n# square cross section\r\nB, H = symbols(\"B, H\")\r\nI = B * H**3 / 12\r\n# numerical values (material: steel)\r\nd = {B: 1e-02, H: 1e-02, E: 210e09, L: 0.2, F: 100}\r\n\r\nb2 = Beam(L, E, I)\r\nb2.apply_load(-F, L / 2, -1)\r\nb2.apply_support(0, \"fixed\")\r\nR0, M0 = symbols(\"R_0, M_0\")\r\nb2.solve_for_reaction_loads(R0, M0)\r\n```\r\n\r\nThen:\r\n\r\n```\r\nb2.shear_force()\r\n```\r\n\r\nThe following error appears:\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n/usr/local/lib/python3.8/dist-packages/IPython/core/formatters.py in __call__(self, obj)\r\n 343 method = get_real_method(obj, self.print_method)\r\n 344 if method is not None:\r\n--> 345 return method()\r\n 346 return None\r\n 347 else:\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/interactive/printing.py in _print_latex_png(o)\r\n 184 \"\"\"\r\n 185 if _can_print(o):\r\n--> 186 s = latex(o, mode=latex_mode, **settings)\r\n 187 if latex_mode == 'plain':\r\n 188 s = '$\\\\displaystyle %s$' % s\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/printer.py in __call__(self, *args, **kwargs)\r\n 371 \r\n 372 def __call__(self, *args, **kwargs):\r\n--> 373 return self.__wrapped__(*args, **kwargs)\r\n 374 \r\n 375 @property\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/latex.py in latex(expr, **settings)\r\n 2913 \r\n 2914 \"\"\"\r\n-> 2915 return LatexPrinter(settings).doprint(expr)\r\n 2916 \r\n 2917 \r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/latex.py in doprint(self, expr)\r\n 252 \r\n 253 def doprint(self, expr):\r\n--> 254 tex = Printer.doprint(self, expr)\r\n 255 \r\n 256 if self._settings['mode'] == 'plain':\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/printer.py in doprint(self, expr)\r\n 289 def doprint(self, expr):\r\n 290 \"\"\"Returns printer's representation for expr (as a string)\"\"\"\r\n--> 291 return self._str(self._print(expr))\r\n 292 \r\n 293 def _print(self, expr, **kwargs):\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/printer.py in _print(self, expr, **kwargs)\r\n 327 printmethod = '_print_' + cls.__name__\r\n 328 if hasattr(self, printmethod):\r\n--> 329 return getattr(self, printmethod)(expr, **kwargs)\r\n 330 # Unknown object, fall back to the emptyPrinter.\r\n 331 return self.emptyPrinter(expr)\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/latex.py in _print_Add(self, expr, order)\r\n 381 else:\r\n 382 tex += \" + \"\r\n--> 383 term_tex = self._print(term)\r\n 384 if self._needs_add_brackets(term):\r\n 385 term_tex = r\"\\left(%s\\right)\" % term_tex\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/printer.py in _print(self, expr, **kwargs)\r\n 327 printmethod = '_print_' + cls.__name__\r\n 328 if hasattr(self, printmethod):\r\n--> 329 return getattr(self, printmethod)(expr, **kwargs)\r\n 330 # Unknown object, fall back to the emptyPrinter.\r\n 331 return self.emptyPrinter(expr)\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/latex.py in _print_Mul(self, expr)\r\n 565 # use the original expression here, since fraction() may have\r\n 566 # altered it when producing numer and denom\r\n--> 567 tex += convert(expr)\r\n 568 \r\n 569 else:\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/latex.py in convert(expr)\r\n 517 isinstance(x.base, Quantity)))\r\n 518 \r\n--> 519 return convert_args(args)\r\n 520 \r\n 521 def convert_args(args):\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/latex.py in convert_args(args)\r\n 523 \r\n 524 for i, term in enumerate(args):\r\n--> 525 term_tex = self._print(term)\r\n 526 \r\n 527 if self._needs_mul_brackets(term, first=(i == 0),\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/printer.py in _print(self, expr, **kwargs)\r\n 327 printmethod = '_print_' + cls.__name__\r\n 328 if hasattr(self, printmethod):\r\n--> 329 return getattr(self, printmethod)(expr, **kwargs)\r\n 330 # Unknown object, fall back to the emptyPrinter.\r\n 331 return self.emptyPrinter(expr)\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/latex.py in _print_Add(self, expr, order)\r\n 381 else:\r\n 382 tex += \" + \"\r\n--> 383 term_tex = self._print(term)\r\n 384 if self._needs_add_brackets(term):\r\n 385 term_tex = r\"\\left(%s\\right)\" % term_tex\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/printer.py in _print(self, expr, **kwargs)\r\n 327 printmethod = '_print_' + cls.__name__\r\n 328 if hasattr(self, printmethod):\r\n--> 329 return getattr(self, printmethod)(expr, **kwargs)\r\n 330 # Unknown object, fall back to the emptyPrinter.\r\n 331 return self.emptyPrinter(expr)\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/latex.py in _print_Mul(self, expr)\r\n 569 else:\r\n 570 snumer = convert(numer)\r\n--> 571 sdenom = convert(denom)\r\n 572 ldenom = len(sdenom.split())\r\n 573 ratio = self._settings['long_frac_ratio']\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/latex.py in convert(expr)\r\n 505 def convert(expr):\r\n 506 if not expr.is_Mul:\r\n--> 507 return str(self._print(expr))\r\n 508 else:\r\n 509 if self.order not in ('old', 'none'):\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/printer.py in _print(self, expr, **kwargs)\r\n 327 printmethod = '_print_' + cls.__name__\r\n 328 if hasattr(self, printmethod):\r\n--> 329 return getattr(self, printmethod)(expr, **kwargs)\r\n 330 # Unknown object, fall back to the emptyPrinter.\r\n 331 return self.emptyPrinter(expr)\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/latex.py in _print_Add(self, expr, order)\r\n 381 else:\r\n 382 tex += \" + \"\r\n--> 383 term_tex = self._print(term)\r\n 384 if self._needs_add_brackets(term):\r\n 385 term_tex = r\"\\left(%s\\right)\" % term_tex\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/printer.py in _print(self, expr, **kwargs)\r\n 327 printmethod = '_print_' + cls.__name__\r\n 328 if hasattr(self, printmethod):\r\n--> 329 return getattr(self, printmethod)(expr, **kwargs)\r\n 330 # Unknown object, fall back to the emptyPrinter.\r\n 331 return self.emptyPrinter(expr)\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/latex.py in _print_Pow(self, expr)\r\n 649 else:\r\n 650 if expr.base.is_Function:\r\n--> 651 return self._print(expr.base, exp=self._print(expr.exp))\r\n 652 else:\r\n 653 tex = r\"%s^{%s}\"\r\n\r\n/usr/local/lib/python3.8/dist-packages/sympy/printing/printer.py in _print(self, expr, **kwargs)\r\n 327 printmethod = '_print_' + cls.__name__\r\n 328 if hasattr(self, printmethod):\r\n--> 329 return getattr(self, printmethod)(expr, **kwargs)\r\n 330 # Unknown object, fall back to the emptyPrinter.\r\n 331 return self.emptyPrinter(expr)\r\n\r\nTypeError: _print_SingularityFunction() got an unexpected keyword argument 'exp'\r\n```\n", + "golden_patch": "diff --git a/sympy/printing/latex.py b/sympy/printing/latex.py\n--- a/sympy/printing/latex.py\n+++ b/sympy/printing/latex.py\n@@ -1968,10 +1968,12 @@ def _print_DiracDelta(self, expr, exp=None):\n tex = r\"\\left(%s\\right)^{%s}\" % (tex, exp)\n return tex\n \n- def _print_SingularityFunction(self, expr):\n+ def _print_SingularityFunction(self, expr, exp=None):\n shift = self._print(expr.args[0] - expr.args[1])\n power = self._print(expr.args[2])\n tex = r\"{\\left\\langle %s \\right\\rangle}^{%s}\" % (shift, power)\n+ if exp is not None:\n+ tex = r\"{\\left({\\langle %s \\rangle}^{%s}\\right)}^{%s}\" % (shift, power, exp)\n return tex\n \n def _print_Heaviside(self, expr, exp=None):\n", + "test_patch": "diff --git a/sympy/printing/tests/test_latex.py b/sympy/printing/tests/test_latex.py\n--- a/sympy/printing/tests/test_latex.py\n+++ b/sympy/printing/tests/test_latex.py\n@@ -214,6 +214,19 @@ def test_latex_SingularityFunction():\n assert latex(SingularityFunction(x, 4, -1)) == \\\n r\"{\\left\\langle x - 4 \\right\\rangle}^{-1}\"\n \n+ assert latex(SingularityFunction(x, 4, 5)**3) == \\\n+ r\"{\\left({\\langle x - 4 \\rangle}^{5}\\right)}^{3}\"\n+ assert latex(SingularityFunction(x, -3, 4)**3) == \\\n+ r\"{\\left({\\langle x + 3 \\rangle}^{4}\\right)}^{3}\"\n+ assert latex(SingularityFunction(x, 0, 4)**3) == \\\n+ r\"{\\left({\\langle x \\rangle}^{4}\\right)}^{3}\"\n+ assert latex(SingularityFunction(x, a, n)**3) == \\\n+ r\"{\\left({\\langle - a + x \\rangle}^{n}\\right)}^{3}\"\n+ assert latex(SingularityFunction(x, 4, -2)**3) == \\\n+ r\"{\\left({\\langle x - 4 \\rangle}^{-2}\\right)}^{3}\"\n+ assert latex((SingularityFunction(x, 4, -1)**3)**3) == \\\n+ r\"{\\left({\\langle x - 4 \\rangle}^{-1}\\right)}^{9}\"\n+\n \n def test_latex_cycle():\n assert latex(Cycle(1, 2, 4)) == r\"\\left( 1\\; 2\\; 4\\right)\"\n", + "fail_to_pass": "[\"test_latex_SingularityFunction\"]", + "pass_to_pass": "[\"test_printmethod\", \"test_latex_basic\", \"test_latex_builtins\", \"test_latex_cycle\", \"test_latex_permutation\", \"test_latex_Float\", \"test_latex_vector_expressions\", \"test_latex_symbols\", \"test_latex_functions\", \"test_function_subclass_different_name\", \"test_hyper_printing\", \"test_latex_bessel\", \"test_latex_fresnel\", \"test_latex_brackets\", \"test_latex_indexed\", \"test_latex_derivatives\", \"test_latex_subs\", \"test_latex_integrals\", \"test_latex_sets\", \"test_latex_SetExpr\", \"test_latex_Range\", \"test_latex_sequences\", \"test_latex_FourierSeries\", \"test_latex_FormalPowerSeries\", \"test_latex_intervals\", \"test_latex_AccumuBounds\", \"test_latex_emptyset\", \"test_latex_universalset\", \"test_latex_commutator\", \"test_latex_union\", \"test_latex_intersection\", \"test_latex_symmetric_difference\", \"test_latex_Complement\", \"test_latex_productset\", \"test_set_operators_parenthesis\", \"test_latex_Complexes\", \"test_latex_Naturals\", \"test_latex_Naturals0\", \"test_latex_Integers\", \"test_latex_ImageSet\", \"test_latex_ConditionSet\", \"test_latex_ComplexRegion\", \"test_latex_Contains\", \"test_latex_sum\", \"test_latex_product\", \"test_latex_limits\", \"test_latex_log\", \"test_issue_3568\", \"test_latex\", \"test_latex_dict\", \"test_latex_list\", \"test_latex_rational\", \"test_latex_inverse\", \"test_latex_DiracDelta\", \"test_latex_Heaviside\", \"test_latex_KroneckerDelta\", \"test_latex_LeviCivita\", \"test_mode\", \"test_latex_mathieu\", \"test_latex_Piecewise\", \"test_latex_Matrix\", \"test_latex_matrix_with_functions\", \"test_latex_NDimArray\", \"test_latex_mul_symbol\", \"test_latex_issue_4381\", \"test_latex_issue_4576\", \"test_latex_pow_fraction\", \"test_noncommutative\", \"test_latex_order\", \"test_latex_Lambda\", \"test_latex_PolyElement\", \"test_latex_FracElement\", \"test_latex_Poly\", \"test_latex_Poly_order\", \"test_latex_ComplexRootOf\", \"test_latex_RootSum\", \"test_settings\", \"test_latex_numbers\", \"test_latex_euler\", \"test_lamda\", \"test_custom_symbol_names\", \"test_matAdd\", \"test_matMul\", \"test_latex_MatrixSlice\", \"test_latex_RandomDomain\", \"test_PrettyPoly\", \"test_integral_transforms\", \"test_PolynomialRingBase\", \"test_categories\", \"test_Modules\", \"test_QuotientRing\", \"test_Tr\", \"test_Adjoint\", \"test_Transpose\", \"test_Hadamard\", \"test_ElementwiseApplyFunction\", \"test_ZeroMatrix\", \"test_OneMatrix\", \"test_Identity\", \"test_boolean_args_order\", \"test_imaginary\", \"test_builtins_without_args\", \"test_latex_greek_functions\", \"test_translate\", \"test_other_symbols\", \"test_modifiers\", \"test_greek_symbols\", \"test_fancyset_symbols\", \"test_builtin_no_args\", \"test_issue_6853\", \"test_Mul\", \"test_Pow\", \"test_issue_7180\", \"test_issue_8409\", \"test_issue_8470\", \"test_issue_15439\", \"test_issue_2934\", \"test_issue_10489\", \"test_issue_12886\", \"test_issue_13559\", \"test_issue_13651\", \"test_latex_UnevaluatedExpr\", \"test_MatrixElement_printing\", \"test_MatrixSymbol_printing\", \"test_KroneckerProduct_printing\", \"test_Series_printing\", \"test_TransferFunction_printing\", \"test_Parallel_printing\", \"test_Feedback_printing\", \"test_Quaternion_latex_printing\", \"test_TensorProduct_printing\", \"test_WedgeProduct_printing\", \"test_issue_9216\", \"test_latex_printer_tensor\", \"test_multiline_latex\", \"test_issue_15353\", \"test_trace\", \"test_print_basic\", \"test_MatrixSymbol_bold\", \"test_AppliedPermutation\", \"test_PermutationMatrix\", \"test_imaginary_unit\", \"test_text_re_im\", \"test_latex_diffgeom\", \"test_unit_printing\", \"test_issue_17092\", \"test_latex_decimal_separator\", \"test_Str\", \"test_latex_escape\", \"test_emptyPrinter\", \"test_global_settings\", \"test_pickleable\"]", + "expected_spans": { + "sympy/printing/latex.py": [ + "LatexPrinter._print_SingularityFunction" + ] + }, + "test_file_spans": { + "sympy/printing/tests/test_latex.py": [ + "test_latex_SingularityFunction" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "pydata__xarray-5131", + "repo": "pydata/xarray", + "base_commit": "e56905889c836c736152b11a7e6117a229715975", + "problem_statement": "Trailing whitespace in DatasetGroupBy text representation\nWhen displaying a DatasetGroupBy in an interactive Python session, the first line of output contains a trailing whitespace. The first example in the documentation demonstrate this:\r\n\r\n```pycon\r\n>>> import xarray as xr, numpy as np\r\n>>> ds = xr.Dataset(\r\n... {\"foo\": ((\"x\", \"y\"), np.random.rand(4, 3))},\r\n... coords={\"x\": [10, 20, 30, 40], \"letters\": (\"x\", list(\"abba\"))},\r\n... )\r\n>>> ds.groupby(\"letters\")\r\nDatasetGroupBy, grouped over 'letters' \r\n2 groups with labels 'a', 'b'.\r\n```\r\n\r\nThere is a trailing whitespace in the first line of output which is \"DatasetGroupBy, grouped over 'letters' \". This can be seen more clearly by converting the object to a string (note the whitespace before `\\n`):\r\n\r\n```pycon\r\n>>> str(ds.groupby(\"letters\"))\r\n\"DatasetGroupBy, grouped over 'letters' \\n2 groups with labels 'a', 'b'.\"\r\n```\r\n\r\n\r\nWhile this isn't a problem in itself, it causes an issue for us because we use flake8 in continuous integration to verify that our code is correctly formatted and we also have doctests that rely on DatasetGroupBy textual representation. Flake8 reports a violation on the trailing whitespaces in our docstrings. If we remove the trailing whitespaces, our doctests fail because the expected output doesn't match the actual output. So we have conflicting constraints coming from our tools which both seem reasonable. Trailing whitespaces are forbidden by flake8 because, among other reasons, they lead to noisy git diffs. Doctest want the expected output to be exactly the same as the actual output and considers a trailing whitespace to be a significant difference. We could configure flake8 to ignore this particular violation for the files in which we have these doctests, but this may cause other trailing whitespaces to creep in our code, which we don't want. Unfortunately it's not possible to just add `# NoQA` comments to get flake8 to ignore the violation only for specific lines because that creates a difference between expected and actual output from doctest point of view. Flake8 doesn't allow to disable checks for blocks of code either.\r\n\r\nIs there a reason for having this trailing whitespace in DatasetGroupBy representation? Whould it be OK to remove it? If so please let me know and I can make a pull request.\n", + "golden_patch": "diff --git a/xarray/core/groupby.py b/xarray/core/groupby.py\n--- a/xarray/core/groupby.py\n+++ b/xarray/core/groupby.py\n@@ -436,7 +436,7 @@ def __iter__(self):\n return zip(self._unique_coord.values, self._iter_grouped())\n \n def __repr__(self):\n- return \"{}, grouped over {!r} \\n{!r} groups with labels {}.\".format(\n+ return \"{}, grouped over {!r}\\n{!r} groups with labels {}.\".format(\n self.__class__.__name__,\n self._unique_coord.name,\n self._unique_coord.size,\n", + "test_patch": "diff --git a/xarray/tests/test_groupby.py b/xarray/tests/test_groupby.py\n--- a/xarray/tests/test_groupby.py\n+++ b/xarray/tests/test_groupby.py\n@@ -388,7 +388,7 @@ def test_da_groupby_assign_coords():\n def test_groupby_repr(obj, dim):\n actual = repr(obj.groupby(dim))\n expected = \"%sGroupBy\" % obj.__class__.__name__\n- expected += \", grouped over %r \" % dim\n+ expected += \", grouped over %r\" % dim\n expected += \"\\n%r groups with labels \" % (len(np.unique(obj[dim])))\n if dim == \"x\":\n expected += \"1, 2, 3, 4, 5.\"\n@@ -405,7 +405,7 @@ def test_groupby_repr(obj, dim):\n def test_groupby_repr_datetime(obj):\n actual = repr(obj.groupby(\"t.month\"))\n expected = \"%sGroupBy\" % obj.__class__.__name__\n- expected += \", grouped over 'month' \"\n+ expected += \", grouped over 'month'\"\n expected += \"\\n%r groups with labels \" % (len(np.unique(obj.t.dt.month)))\n expected += \"1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12.\"\n assert actual == expected\n", + "fail_to_pass": "[\"xarray/tests/test_groupby.py::test_groupby_repr[obj0-x]\", \"xarray/tests/test_groupby.py::test_groupby_repr[obj0-y]\", \"xarray/tests/test_groupby.py::test_groupby_repr[obj0-z]\", \"xarray/tests/test_groupby.py::test_groupby_repr[obj0-month]\", \"xarray/tests/test_groupby.py::test_groupby_repr[obj1-x]\", \"xarray/tests/test_groupby.py::test_groupby_repr[obj1-y]\", \"xarray/tests/test_groupby.py::test_groupby_repr[obj1-z]\", \"xarray/tests/test_groupby.py::test_groupby_repr[obj1-month]\", \"xarray/tests/test_groupby.py::test_groupby_repr_datetime[obj0]\", \"xarray/tests/test_groupby.py::test_groupby_repr_datetime[obj1]\"]", + "pass_to_pass": "[\"xarray/tests/test_groupby.py::test_consolidate_slices\", \"xarray/tests/test_groupby.py::test_groupby_dims_property\", \"xarray/tests/test_groupby.py::test_multi_index_groupby_map\", \"xarray/tests/test_groupby.py::test_multi_index_groupby_sum\", \"xarray/tests/test_groupby.py::test_groupby_da_datetime\", \"xarray/tests/test_groupby.py::test_groupby_duplicate_coordinate_labels\", \"xarray/tests/test_groupby.py::test_groupby_input_mutation\", \"xarray/tests/test_groupby.py::test_groupby_map_shrink_groups[obj0]\", \"xarray/tests/test_groupby.py::test_groupby_map_shrink_groups[obj1]\", \"xarray/tests/test_groupby.py::test_groupby_map_change_group_size[obj0]\", \"xarray/tests/test_groupby.py::test_groupby_map_change_group_size[obj1]\", \"xarray/tests/test_groupby.py::test_da_groupby_map_func_args\", \"xarray/tests/test_groupby.py::test_ds_groupby_map_func_args\", \"xarray/tests/test_groupby.py::test_da_groupby_empty\", \"xarray/tests/test_groupby.py::test_da_groupby_quantile\", \"xarray/tests/test_groupby.py::test_ds_groupby_quantile\", \"xarray/tests/test_groupby.py::test_da_groupby_assign_coords\", \"xarray/tests/test_groupby.py::test_groupby_drops_nans\", \"xarray/tests/test_groupby.py::test_groupby_grouping_errors\", \"xarray/tests/test_groupby.py::test_groupby_reduce_dimension_error\", \"xarray/tests/test_groupby.py::test_groupby_multiple_string_args\", \"xarray/tests/test_groupby.py::test_groupby_bins_timeseries\", \"xarray/tests/test_groupby.py::test_groupby_none_group_name\", \"xarray/tests/test_groupby.py::test_groupby_getitem\"]", + "expected_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "test_file_spans": { + "xarray/tests/test_groupby.py": [ + "test_groupby_repr", + "test_groupby_repr_datetime" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ], + "xarray/tests/test_groupby.py": [ + "test_groupby_repr", + "test_groupby_repr_datetime" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "xarray/core/groupby.py": [ + "imports", + "check_reduce_dims", + "unique_value_groups", + "_dummy_copy", + "_consolidate_slices", + "_inverse_permutation_indices", + "_DummyGroup", + "_DummyGroup.dims", + "_DummyGroup.shape", + "_ensure_1d", + "_apply_loffset", + "GroupBy", + "GroupBy.__init__", + "GroupBy.dims", + "GroupBy.groups", + "GroupBy.__repr__", + "GroupBy._get_index_and_items", + "GroupBy._infer_concat_args", + "GroupBy._binary_op", + "GroupBy._yield_binary_applied", + "GroupBy._maybe_unstack", + "GroupBy.quantile", + "GroupBy._first_or_last", + "GroupBy.assign_coords", + "_maybe_reorder", + "DataArrayGroupBy._concat_shortcut", + "DataArrayGroupBy._restore_dim_order", + "DataArrayGroupBy.map", + "DataArrayGroupBy.apply", + "DataArrayGroupBy._combine", + "DataArrayGroupBy.reduce", + "DatasetGroupBy.map", + "DatasetGroupBy.apply", + "DatasetGroupBy._combine", + "DatasetGroupBy.reduce" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "imports", + "check_reduce_dims", + "unique_value_groups", + "_dummy_copy", + "_consolidate_slices", + "_inverse_permutation_indices", + "_DummyGroup", + "_DummyGroup.dims", + "_DummyGroup.shape", + "_ensure_1d", + "_apply_loffset", + "GroupBy", + "GroupBy.__init__", + "GroupBy.dims", + "GroupBy.groups", + "GroupBy.__repr__", + "GroupBy._get_index_and_items", + "GroupBy._infer_concat_args", + "GroupBy._binary_op", + "GroupBy._yield_binary_applied", + "GroupBy._maybe_unstack", + "GroupBy.quantile", + "GroupBy._first_or_last", + "GroupBy.assign_coords", + "_maybe_reorder", + "DataArrayGroupBy._concat_shortcut", + "DataArrayGroupBy._restore_dim_order", + "DataArrayGroupBy.map", + "DataArrayGroupBy.apply", + "DataArrayGroupBy._combine", + "DataArrayGroupBy.reduce", + "DatasetGroupBy.map", + "DatasetGroupBy.apply", + "DatasetGroupBy._combine", + "DatasetGroupBy.reduce" + ] + } + }, + { + "name": "20240402_rag_gpt4", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240402_rag_claude3opus", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__iter__", + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__iter__", + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ], + "xarray/tests/test_groupby.py": [ + "test_groupby_repr_datetime" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + }, + "alternative_spans": { + "xarray/core/groupby.py": [ + "GroupBy.__repr__" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-14238", + "repo": "django/django", + "base_commit": "30e123ed351317b7527f632b3b7dc4e81e850449", + "problem_statement": "DEFAULT_AUTO_FIELD subclass check fails for subclasses of BigAutoField and SmallAutoField.\nDescription\n\t\nSet DEFAULT_AUTO_FIELD = \"example.core.models.MyBigAutoField\" , with contents of example.core.models:\nfrom django.db import models\nclass MyBigAutoField(models.BigAutoField):\n\tpass\nclass MyModel(models.Model):\n\tpass\nDjango then crashes with:\nTraceback (most recent call last):\n File \"/..././manage.py\", line 21, in \n\tmain()\n File \"/..././manage.py\", line 17, in main\n\texecute_from_command_line(sys.argv)\n File \"/.../venv/lib/python3.9/site-packages/django/core/management/__init__.py\", line 419, in execute_from_command_line\n\tutility.execute()\n File \"/.../venv/lib/python3.9/site-packages/django/core/management/__init__.py\", line 395, in execute\n\tdjango.setup()\n File \"/.../venv/lib/python3.9/site-packages/django/__init__.py\", line 24, in setup\n\tapps.populate(settings.INSTALLED_APPS)\n File \"/.../venv/lib/python3.9/site-packages/django/apps/registry.py\", line 114, in populate\n\tapp_config.import_models()\n File \"/.../venv/lib/python3.9/site-packages/django/apps/config.py\", line 301, in import_models\n\tself.models_module = import_module(models_module_name)\n File \"/Users/chainz/.pyenv/versions/3.9.1/lib/python3.9/importlib/__init__.py\", line 127, in import_module\n\treturn _bootstrap._gcd_import(name[level:], package, level)\n File \"\", line 1030, in _gcd_import\n File \"\", line 1007, in _find_and_load\n File \"\", line 986, in _find_and_load_unlocked\n File \"\", line 680, in _load_unlocked\n File \"\", line 790, in exec_module\n File \"\", line 228, in _call_with_frames_removed\n File \"/.../example/core/models.py\", line 8, in \n\tclass MyModel(models.Model):\n File \"/.../venv/lib/python3.9/site-packages/django/db/models/base.py\", line 320, in __new__\n\tnew_class._prepare()\n File \"/.../venv/lib/python3.9/site-packages/django/db/models/base.py\", line 333, in _prepare\n\topts._prepare(cls)\n File \"/.../venv/lib/python3.9/site-packages/django/db/models/options.py\", line 285, in _prepare\n\tpk_class = self._get_default_pk_class()\n File \"/.../venv/lib/python3.9/site-packages/django/db/models/options.py\", line 246, in _get_default_pk_class\n\traise ValueError(\nValueError: Primary key 'example.core.models.MyBigAutoField' referred by DEFAULT_AUTO_FIELD must subclass AutoField.\nThis can be fixed in AutoFieldMeta.__subclasscheck__ by allowing subclasses of those classes in the _subclasses property.\n", + "golden_patch": "diff --git a/django/db/models/fields/__init__.py b/django/db/models/fields/__init__.py\n--- a/django/db/models/fields/__init__.py\n+++ b/django/db/models/fields/__init__.py\n@@ -2524,7 +2524,7 @@ def __instancecheck__(self, instance):\n return isinstance(instance, self._subclasses) or super().__instancecheck__(instance)\n \n def __subclasscheck__(self, subclass):\n- return subclass in self._subclasses or super().__subclasscheck__(subclass)\n+ return issubclass(subclass, self._subclasses) or super().__subclasscheck__(subclass)\n \n \n class AutoField(AutoFieldMixin, IntegerField, metaclass=AutoFieldMeta):\n", + "test_patch": "diff --git a/tests/model_fields/test_autofield.py b/tests/model_fields/test_autofield.py\n--- a/tests/model_fields/test_autofield.py\n+++ b/tests/model_fields/test_autofield.py\n@@ -30,6 +30,18 @@ def test_isinstance_of_autofield(self):\n self.assertIsInstance(field(), models.AutoField)\n \n def test_issubclass_of_autofield(self):\n- for field in (models.BigAutoField, models.SmallAutoField):\n+ class MyBigAutoField(models.BigAutoField):\n+ pass\n+\n+ class MySmallAutoField(models.SmallAutoField):\n+ pass\n+\n+ tests = [\n+ MyBigAutoField,\n+ MySmallAutoField,\n+ models.BigAutoField,\n+ models.SmallAutoField,\n+ ]\n+ for field in tests:\n with self.subTest(field.__name__):\n self.assertTrue(issubclass(field, models.AutoField))\ndiff --git a/tests/model_options/test_default_pk.py b/tests/model_options/test_default_pk.py\n--- a/tests/model_options/test_default_pk.py\n+++ b/tests/model_options/test_default_pk.py\n@@ -4,6 +4,10 @@\n from django.test.utils import isolate_apps\n \n \n+class MyBigAutoField(models.BigAutoField):\n+ pass\n+\n+\n @isolate_apps('model_options')\n class TestDefaultPK(SimpleTestCase):\n @override_settings(DEFAULT_AUTO_FIELD='django.db.models.NonexistentAutoField')\n@@ -74,6 +78,15 @@ class Model(models.Model):\n \n self.assertIsInstance(Model._meta.pk, models.SmallAutoField)\n \n+ @override_settings(\n+ DEFAULT_AUTO_FIELD='model_options.test_default_pk.MyBigAutoField'\n+ )\n+ def test_default_auto_field_setting_bigautofield_subclass(self):\n+ class Model(models.Model):\n+ pass\n+\n+ self.assertIsInstance(Model._meta.pk, MyBigAutoField)\n+\n @isolate_apps('model_options.apps.ModelPKConfig')\n @override_settings(DEFAULT_AUTO_FIELD='django.db.models.AutoField')\n def test_app_default_auto_field(self):\n", + "fail_to_pass": "[\"test_issubclass_of_autofield (model_fields.test_autofield.AutoFieldInheritanceTests)\", \"test_default_auto_field_setting_bigautofield_subclass (model_options.test_default_pk.TestDefaultPK)\"]", + "pass_to_pass": "[\"test_isinstance_of_autofield (model_fields.test_autofield.AutoFieldInheritanceTests)\", \"Backend specific ranges can be saved without corruption.\", \"Backend specific ranges are enforced at the model validation level\", \"test_coercing (model_fields.test_autofield.AutoFieldTests)\", \"Values within the documented safe range pass validation, and can be\", \"test_invalid_value (model_fields.test_autofield.AutoFieldTests)\", \"If there are stricter validators than the ones from the database\", \"test_rel_db_type (model_fields.test_autofield.AutoFieldTests)\", \"test_types (model_fields.test_autofield.AutoFieldTests)\", \"test_app_default_auto_field (model_options.test_default_pk.TestDefaultPK)\", \"test_app_default_auto_field_non_auto (model_options.test_default_pk.TestDefaultPK)\", \"test_app_default_auto_field_none (model_options.test_default_pk.TestDefaultPK)\", \"test_app_default_auto_field_nonexistent (model_options.test_default_pk.TestDefaultPK)\", \"test_default_auto_field_setting (model_options.test_default_pk.TestDefaultPK)\", \"test_default_auto_field_setting_non_auto (model_options.test_default_pk.TestDefaultPK)\", \"test_default_auto_field_setting_none (model_options.test_default_pk.TestDefaultPK)\", \"test_default_auto_field_setting_nonexistent (model_options.test_default_pk.TestDefaultPK)\", \"test_m2m_app_default_auto_field (model_options.test_default_pk.TestDefaultPK)\", \"test_m2m_default_auto_field_setting (model_options.test_default_pk.TestDefaultPK)\", \"test_coercing (model_fields.test_integerfield.SmallIntegerFieldTests)\", \"test_invalid_value (model_fields.test_integerfield.SmallIntegerFieldTests)\", \"test_rel_db_type (model_fields.test_integerfield.SmallIntegerFieldTests)\", \"test_types (model_fields.test_integerfield.SmallIntegerFieldTests)\", \"test_coercing (model_fields.test_integerfield.IntegerFieldTests)\", \"test_invalid_value (model_fields.test_integerfield.IntegerFieldTests)\", \"test_rel_db_type (model_fields.test_integerfield.IntegerFieldTests)\", \"test_types (model_fields.test_integerfield.IntegerFieldTests)\", \"test_coercing (model_fields.test_integerfield.BigIntegerFieldTests)\", \"test_invalid_value (model_fields.test_integerfield.BigIntegerFieldTests)\", \"test_rel_db_type (model_fields.test_integerfield.BigIntegerFieldTests)\", \"test_types (model_fields.test_integerfield.BigIntegerFieldTests)\", \"test_coercing (model_fields.test_autofield.SmallAutoFieldTests)\", \"test_invalid_value (model_fields.test_autofield.SmallAutoFieldTests)\", \"test_rel_db_type (model_fields.test_autofield.SmallAutoFieldTests)\", \"test_types (model_fields.test_autofield.SmallAutoFieldTests)\", \"test_coercing (model_fields.test_autofield.BigAutoFieldTests)\", \"test_invalid_value (model_fields.test_autofield.BigAutoFieldTests)\", \"test_rel_db_type (model_fields.test_autofield.BigAutoFieldTests)\", \"test_types (model_fields.test_autofield.BigAutoFieldTests)\"]", + "expected_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "test_file_spans": { + "tests/model_fields/test_autofield.py": [ + "AutoFieldInheritanceTests.test_issubclass_of_autofield" + ], + "tests/model_options/test_default_pk.py": [ + "imports", + "TestDefaultPK.test_app_default_auto_field" + ] + }, + "resolved_by": [ + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta._subclasses", + "AutoFieldMeta.__instancecheck__", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta._subclasses", + "AutoFieldMeta.__instancecheck__", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta._subclasses", + "AutoFieldMeta.__instancecheck__", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta._subclasses", + "AutoFieldMeta.__instancecheck__", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240725_opendevin_codeact_v1.8_claude35sonnet", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ], + "tests/model_fields/test_autofield.py": [] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta._subclasses" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta._subclasses" + ] + } + }, + { + "name": "20240620_sweagent_claude3.5sonnet", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ], + "tests/model_fields/test_autofield.py": [ + "imports", + "AutoFieldTests", + "BigAutoFieldTests", + "SmallAutoFieldTests", + "AutoFieldInheritanceTests.test_isinstance_of_autofield" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + }, + "alternative_spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta", + "AutoFieldMeta.__subclasscheck__" + ] + } + } + ], + "alternative_spans": [ + { + "run_name": "20240627_abanteai_mentatbot_gpt4o", + "spans": { + "django/db/models/fields/__init__.py": [ + "AutoFieldMeta._subclasses" + ] + } + } + ] + }, + { + "instance_id": "sympy__sympy-21379", + "repo": "sympy/sympy", + "base_commit": "624217179aaf8d094e6ff75b7493ad1ee47599b0", + "problem_statement": "Unexpected `PolynomialError` when using simple `subs()` for particular expressions\nI am seeing weird behavior with `subs` for particular expressions with hyperbolic sinusoids with piecewise arguments. When applying `subs`, I obtain an unexpected `PolynomialError`. For context, I was umbrella-applying a casting from int to float of all int atoms for a bunch of random expressions before using a tensorflow lambdify to avoid potential tensorflow type errors. You can pretend the expression below has a `+ 1` at the end, but below is the MWE that I could produce.\r\n\r\nSee the expression below, and the conditions in which the exception arises.\r\n\r\nSympy version: 1.8.dev\r\n\r\n```python\r\nfrom sympy import *\r\nfrom sympy.core.cache import clear_cache\r\n\r\nx, y, z = symbols('x y z')\r\n\r\nclear_cache()\r\nexpr = exp(sinh(Piecewise((x, y > x), (y, True)) / z))\r\n# This works fine\r\nexpr.subs({1: 1.0})\r\n\r\nclear_cache()\r\nx, y, z = symbols('x y z', real=True)\r\nexpr = exp(sinh(Piecewise((x, y > x), (y, True)) / z))\r\n# This fails with \"PolynomialError: Piecewise generators do not make sense\"\r\nexpr.subs({1: 1.0}) # error\r\n# Now run it again (isympy...) w/o clearing cache and everything works as expected without error\r\nexpr.subs({1: 1.0})\r\n```\r\n\r\nI am not really sure where the issue is, but I think it has something to do with the order of assumptions in this specific type of expression. Here is what I found-\r\n\r\n- The error only (AFAIK) happens with `cosh` or `tanh` in place of `sinh`, otherwise it succeeds\r\n- The error goes away if removing the division by `z`\r\n- The error goes away if removing `exp` (but stays for most unary functions, `sin`, `log`, etc.)\r\n- The error only happens with real symbols for `x` and `y` (`z` does not have to be real)\r\n\r\nNot too sure how to debug this one.\n", + "golden_patch": "diff --git a/sympy/core/mod.py b/sympy/core/mod.py\n--- a/sympy/core/mod.py\n+++ b/sympy/core/mod.py\n@@ -40,6 +40,7 @@ def eval(cls, p, q):\n from sympy.core.mul import Mul\n from sympy.core.singleton import S\n from sympy.core.exprtools import gcd_terms\n+ from sympy.polys.polyerrors import PolynomialError\n from sympy.polys.polytools import gcd\n \n def doit(p, q):\n@@ -166,10 +167,13 @@ def doit(p, q):\n # XXX other possibilities?\n \n # extract gcd; any further simplification should be done by the user\n- G = gcd(p, q)\n- if G != 1:\n- p, q = [\n- gcd_terms(i/G, clear=False, fraction=False) for i in (p, q)]\n+ try:\n+ G = gcd(p, q)\n+ if G != 1:\n+ p, q = [gcd_terms(i/G, clear=False, fraction=False)\n+ for i in (p, q)]\n+ except PolynomialError: # issue 21373\n+ G = S.One\n pwas, qwas = p, q\n \n # simplify terms\n", + "test_patch": "diff --git a/sympy/core/tests/test_arit.py b/sympy/core/tests/test_arit.py\n--- a/sympy/core/tests/test_arit.py\n+++ b/sympy/core/tests/test_arit.py\n@@ -1913,6 +1913,16 @@ def test_Mod():\n assert Mod(x, y).rewrite(floor) == x - y*floor(x/y)\n assert ((x - Mod(x, y))/y).rewrite(floor) == floor(x/y)\n \n+ # issue 21373\n+ from sympy.functions.elementary.trigonometric import sinh\n+ from sympy.functions.elementary.piecewise import Piecewise\n+\n+ x_r, y_r = symbols('x_r y_r', real=True)\n+ (Piecewise((x_r, y_r > x_r), (y_r, True)) / z) % 1\n+ expr = exp(sinh(Piecewise((x_r, y_r > x_r), (y_r, True)) / z))\n+ expr.subs({1: 1.0})\n+ sinh(Piecewise((x_r, y_r > x_r), (y_r, True)) * z ** -1.0).is_zero\n+\n \n def test_Mod_Pow():\n # modular exponentiation\n", + "fail_to_pass": "[\"test_Mod\"]", + "pass_to_pass": "[\"test_bug1\", \"test_Symbol\", \"test_arit0\", \"test_div\", \"test_pow\", \"test_pow2\", \"test_pow3\", \"test_mod_pow\", \"test_pow_E\", \"test_pow_issue_3516\", \"test_pow_im\", \"test_real_mul\", \"test_ncmul\", \"test_mul_add_identity\", \"test_ncpow\", \"test_powerbug\", \"test_Mul_doesnt_expand_exp\", \"test_Mul_is_integer\", \"test_Add_Mul_is_integer\", \"test_Add_Mul_is_finite\", \"test_Mul_is_even_odd\", \"test_evenness_in_ternary_integer_product_with_even\", \"test_oddness_in_ternary_integer_product_with_even\", \"test_Mul_is_rational\", \"test_Add_is_rational\", \"test_Add_is_even_odd\", \"test_Mul_is_negative_positive\", \"test_Mul_is_negative_positive_2\", \"test_Mul_is_nonpositive_nonnegative\", \"test_Add_is_negative_positive\", \"test_Add_is_nonpositive_nonnegative\", \"test_Pow_is_integer\", \"test_Pow_is_real\", \"test_real_Pow\", \"test_Pow_is_finite\", \"test_Pow_is_even_odd\", \"test_Pow_is_negative_positive\", \"test_Pow_is_zero\", \"test_Pow_is_nonpositive_nonnegative\", \"test_Mul_is_imaginary_real\", \"test_Mul_hermitian_antihermitian\", \"test_Add_is_comparable\", \"test_Mul_is_comparable\", \"test_Pow_is_comparable\", \"test_Add_is_positive_2\", \"test_Add_is_irrational\", \"test_Mul_is_irrational\", \"test_issue_3531\", \"test_issue_3531b\", \"test_bug3\", \"test_suppressed_evaluation\", \"test_AssocOp_doit\", \"test_Add_Mul_Expr_args\", \"test_Add_as_coeff_mul\", \"test_Pow_as_coeff_mul_doesnt_expand\", \"test_issue_3514_18626\", \"test_make_args\", \"test_issue_5126\", \"test_Rational_as_content_primitive\", \"test_Add_as_content_primitive\", \"test_Mul_as_content_primitive\", \"test_Pow_as_content_primitive\", \"test_issue_5460\", \"test_product_irrational\", \"test_issue_5919\", \"test_Mod_Pow\", \"test_Mod_is_integer\", \"test_Mod_is_nonposneg\", \"test_issue_6001\", \"test_polar\", \"test_issue_6040\", \"test_issue_6082\", \"test_issue_6077\", \"test_mul_flatten_oo\", \"test_add_flatten\", \"test_issue_5160_6087_6089_6090\", \"test_float_int_round\", \"test_issue_6611a\", \"test_denest_add_mul\", \"test_mul_coeff\", \"test_mul_zero_detection\", \"test_Mul_with_zero_infinite\", \"test_Mul_does_not_cancel_infinities\", \"test_Mul_does_not_distribute_infinity\", \"test_issue_8247_8354\", \"test_Add_is_zero\", \"test_issue_14392\", \"test_divmod\", \"test__neg__\", \"test_issue_18507\", \"test_issue_17130\"]", + "expected_spans": { + "sympy/core/mod.py": [ + "Mod.eval" + ] + }, + "test_file_spans": { + "sympy/core/tests/test_arit.py": [ + "test_Mod" + ] + }, + "resolved_by": [ + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "sympy/polys/polytools.py": [ + "_parallel_poly_from_expr" + ] + }, + "alternative_spans": {} + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "sympy/polys/polytools.py": [ + "_parallel_poly_from_expr" + ] + }, + "alternative_spans": {} + } + ], + "alternative_spans": [] + }, + { + "instance_id": "django__django-14382", + "repo": "django/django", + "base_commit": "29345aecf6e8d53ccb3577a3762bb0c263f7558d", + "problem_statement": "django-admin startapp with trailing slash in directory name results in error\nDescription\n\t\nBash tab-completion appends trailing slashes to directory names. django-admin startapp name directory/ results in the error:\nCommandError: '' is not a valid app directory. Please make sure the directory is a valid identifier.\nThe error is caused by \u200bline 77 of django/core/management/templates.py by calling basename() on the path with no consideration for a trailing slash:\nself.validate_name(os.path.basename(target), 'directory')\nRemoving potential trailing slashes would solve the problem:\nself.validate_name(os.path.basename(target.rstrip(os.sep)), 'directory')\n", + "golden_patch": "diff --git a/django/core/management/templates.py b/django/core/management/templates.py\n--- a/django/core/management/templates.py\n+++ b/django/core/management/templates.py\n@@ -73,9 +73,9 @@ def handle(self, app_or_project, name, target=None, **options):\n except OSError as e:\n raise CommandError(e)\n else:\n- if app_or_project == 'app':\n- self.validate_name(os.path.basename(target), 'directory')\n top_dir = os.path.abspath(os.path.expanduser(target))\n+ if app_or_project == 'app':\n+ self.validate_name(os.path.basename(top_dir), 'directory')\n if not os.path.exists(top_dir):\n raise CommandError(\"Destination directory '%s' does not \"\n \"exist, please create it first.\" % top_dir)\n", + "test_patch": "diff --git a/tests/admin_scripts/tests.py b/tests/admin_scripts/tests.py\n--- a/tests/admin_scripts/tests.py\n+++ b/tests/admin_scripts/tests.py\n@@ -2206,6 +2206,13 @@ def test_importable_target_name(self):\n \"another directory.\"\n )\n \n+ def test_trailing_slash_in_target_app_directory_name(self):\n+ app_dir = os.path.join(self.test_dir, 'apps', 'app1')\n+ os.makedirs(app_dir)\n+ _, err = self.run_django_admin(['startapp', 'app', os.path.join('apps', 'app1', '')])\n+ self.assertNoOutput(err)\n+ self.assertIs(os.path.exists(os.path.join(app_dir, 'apps.py')), True)\n+\n def test_overlaying_app(self):\n # Use a subdirectory so it is outside the PYTHONPATH.\n os.makedirs(os.path.join(self.test_dir, 'apps/app1'))\n", + "fail_to_pass": "[\"test_trailing_slash_in_target_app_directory_name (admin_scripts.tests.StartApp)\"]", + "pass_to_pass": "[\"Program name is computed from the execute_from_command_line()'s argv\", \"test_params_to_runserver (admin_scripts.tests.ManageTestserver)\", \"test_testserver_handle_params (admin_scripts.tests.ManageTestserver)\", \"test_migration_warning_multiple_apps (admin_scripts.tests.ManageRunserverMigrationWarning)\", \"test_migration_warning_one_app (admin_scripts.tests.ManageRunserverMigrationWarning)\", \"Ensure runserver.check_migrations doesn't choke on empty DATABASES.\", \"runserver.check_migrations() doesn't choke when a database is read-only.\", \"test_runner_addrport_ipv6 (admin_scripts.tests.ManageRunserver)\", \"test_runner_ambiguous (admin_scripts.tests.ManageRunserver)\", \"test_runner_custom_defaults (admin_scripts.tests.ManageRunserver)\", \"test_runner_custom_defaults_ipv6 (admin_scripts.tests.ManageRunserver)\", \"test_runner_hostname (admin_scripts.tests.ManageRunserver)\", \"test_runner_hostname_ipv6 (admin_scripts.tests.ManageRunserver)\", \"test_runserver_addrport (admin_scripts.tests.ManageRunserver)\", \"test_skip_checks (admin_scripts.tests.ManageRunserver)\", \"Apps listed first in INSTALLED_APPS have precedence.\", \"test_program_name_in_help (admin_scripts.tests.MainModule)\", \"test_non_existent_command_output (admin_scripts.tests.ManageManuallyConfiguredSettings)\", \"test_empty_allowed_hosts_error (admin_scripts.tests.ManageRunserverEmptyAllowedHosts)\", \"Regression for #20509\", \"no settings: manage.py builtin commands fail with an error when no settings provided\", \"no settings: manage.py builtin commands fail if settings file (from environment) doesn't exist\", \"no settings: manage.py builtin commands fail if settings file (from argument) doesn't exist\", \"manage.py builtin commands does not swallow attribute error due to bad\", \"Test listing available commands output note when only core commands are\", \"import error: manage.py builtin commands shows useful diagnostic info\", \"test_key_error (admin_scripts.tests.ManageSettingsWithSettingsErrors)\", \"no settings: django-admin builtin commands fail with an error when no settings provided\", \"no settings: django-admin builtin commands fail if settings file (from environment) doesn't exist\", \"no settings: django-admin builtin commands fail if settings file (from argument) doesn't exist\", \"Commands that don't require settings succeed if the settings file\", \"test_no_suggestions (admin_scripts.tests.DjangoAdminSuggestions)\", \"test_suggestions (admin_scripts.tests.DjangoAdminSuggestions)\", \"Options passed before settings are correctly handled.\", \"Options are correctly handled when they are passed before and after\", \"Options passed after settings are correctly handled.\", \"Short options passed after settings are correctly handled.\", \"Short options passed before settings are correctly handled.\", \"alternate: django-admin builtin commands fail with an error when no settings provided\", \"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist\", \"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist\", \"alternate: django-admin builtin commands succeed if settings are provided in the environment\", \"alternate: django-admin builtin commands succeed if settings are provided as argument\", \"alternate: django-admin can't execute user commands unless settings are provided\", \"alternate: django-admin can execute user commands if settings are provided in environment\", \"alternate: django-admin can execute user commands if settings are provided as argument\", \"minimal: django-admin builtin commands fail with an error when no settings provided\", \"minimal: django-admin builtin commands fail if settings file (from environment) doesn't exist\", \"minimal: django-admin builtin commands fail if settings file (from argument) doesn't exist\", \"minimal: django-admin builtin commands fail if settings are provided in the environment\", \"minimal: django-admin builtin commands fail if settings are provided as argument\", \"minimal: django-admin can't execute user commands unless settings are provided\", \"minimal: django-admin can't execute user commands, even if settings are provided in environment\", \"minimal: django-admin can't execute user commands, even if settings are provided as argument\", \"fulldefault: django-admin builtin commands fail with an error when no settings provided\", \"fulldefault: django-admin builtin commands fail if settings file (from environment) doesn't exist\", \"fulldefault: django-admin builtin commands fail if settings file (from argument) doesn't exist\", \"fulldefault: django-admin builtin commands succeed if the environment contains settings\", \"fulldefault: django-admin builtin commands succeed if a settings file is provided\", \"fulldefault: django-admin can't execute user commands unless settings are provided\", \"fulldefault: django-admin can execute user commands if settings are provided in environment\", \"fulldefault: django-admin can execute user commands if settings are provided as argument\", \"default: django-admin builtin commands fail with an error when no settings provided\", \"default: django-admin builtin commands fail if settings file (from environment) doesn't exist\", \"default: django-admin builtin commands fail if settings file (from argument) doesn't exist\", \"default: django-admin builtin commands succeed if settings are provided in the environment\", \"default: django-admin builtin commands succeed if settings are provided as argument\", \"default: django-admin can't execute user commands if it isn't provided settings\", \"default: django-admin can execute user commands if settings are provided in environment\", \"default: django-admin can execute user commands if settings are provided as argument\", \"manage.py check does not raise errors when an app imports a base\", \"manage.py check reports an ImportError if an app's models.py\", \"manage.py check does not raise an ImportError validating a\", \"check reports an error on a nonexistent app in INSTALLED_APPS.\", \"All errors/warnings should be sorted by level and by message.\", \"When there are only warnings or less serious messages, then Django\", \"The all option also shows settings with the default value.\", \"Runs without error and emits settings diff.\", \"The --default option specifies an alternate settings module for\", \"test_dynamic_settings_configured (admin_scripts.tests.DiffSettings)\", \"test_settings_configured (admin_scripts.tests.DiffSettings)\", \"--output=unified emits settings diff in unified mode.\", \"--output=unified --all emits settings diff in unified mode and includes\", \"alternate: manage.py builtin commands fail with an error when no default settings provided\", \"alternate: manage.py builtin commands fail if settings file (from environment) doesn't exist\", \"alternate: manage.py builtin commands fail if settings file (from argument) doesn't exist\", \"alternate: manage.py builtin commands work if settings are provided in the environment\", \"alternate: manage.py builtin commands work with settings provided as argument\", \"alternate: manage.py can't execute user commands without settings\", \"alternate: manage.py output syntax color can be deactivated with the `--no-color` option\", \"alternate: manage.py can execute user commands if settings are provided in environment\", \"alternate: manage.py can execute user commands if settings are provided as argument\", \"directory: django-admin builtin commands fail with an error when no settings provided\", \"directory: django-admin builtin commands fail if settings file (from environment) doesn't exist\", \"directory: django-admin builtin commands fail if settings file (from argument) doesn't exist\", \"directory: django-admin builtin commands succeed if settings are provided in the environment\", \"directory: django-admin builtin commands succeed if settings are provided as argument\", \"directory: django-admin can't execute user commands unless settings are provided\", \"directory: startapp creates the correct directory\", \"directory: startapp creates the correct directory with a custom template\", \"startapp creates the correct directory with Unicode characters.\", \"minimal: manage.py builtin commands fail with an error when no settings provided\", \"minimal: manage.py builtin commands fail if settings file (from environment) doesn't exist\", \"minimal: manage.py builtin commands fail if settings file (from argument) doesn't exist\", \"minimal: manage.py builtin commands fail if settings are provided in the environment\", \"minimal: manage.py builtin commands fail if settings are provided as argument\", \"minimal: manage.py can't execute user commands without appropriate settings\", \"minimal: manage.py can't execute user commands, even if settings are provided in environment\", \"minimal: manage.py can't execute user commands, even if settings are provided as argument\", \"multiple: manage.py builtin commands fail with an error when no settings provided\", \"multiple: manage.py builtin commands fail if settings file (from environment) doesn't exist\", \"multiple: manage.py builtin commands fail if settings file (from argument) doesn't exist\", \"multiple: manage.py can execute builtin commands if settings are provided in the environment\", \"multiple: manage.py builtin commands succeed if settings are provided as argument\", \"multiple: manage.py can't execute user commands using default settings\", \"multiple: manage.py can execute user commands if settings are provided in environment\", \"multiple: manage.py can execute user commands if settings are provided as argument\", \"fulldefault: manage.py builtin commands succeed when default settings are appropriate\", \"fulldefault: manage.py builtin commands fail if settings file (from environment) doesn't exist\", \"fulldefault: manage.py builtin commands succeed if settings file (from argument) doesn't exist\", \"fulldefault: manage.py builtin commands succeed if settings are provided in the environment\", \"fulldefault: manage.py builtin commands succeed if settings are provided as argument\", \"fulldefault: manage.py can execute user commands when default settings are appropriate\", \"fulldefault: manage.py can execute user commands when settings are provided in environment\", \"fulldefault: manage.py can execute user commands when settings are provided as argument\", \"default: manage.py builtin commands succeed when default settings are appropriate\", \"default: manage.py builtin commands fail if settings file (from environment) doesn't exist\", \"default: manage.py builtin commands succeed if settings file (from argument) doesn't exist\", \"default: manage.py builtin commands succeed if settings are provided in the environment\", \"default: manage.py builtin commands succeed if settings are provided as argument\", \"default: manage.py can execute user commands when default settings are appropriate\", \"default: manage.py can execute user commands when settings are provided in environment\", \"default: manage.py can execute user commands when settings are provided as argument\", \"startapp validates that app name doesn't clash with existing Python\", \"test_importable_target_name (admin_scripts.tests.StartApp)\", \"startapp validates that app name is a valid Python identifier.\", \"test_invalid_target_name (admin_scripts.tests.StartApp)\", \"test_overlaying_app (admin_scripts.tests.StartApp)\", \"test_template (admin_scripts.tests.StartApp)\", \"Make sure an exception is raised when the provided\", \"Make sure the startproject management command is able to use a different project template\", \"Make sure template context variables are rendered with proper values\", \"Make sure the startproject management command is able to use a different project template from a tarball\", \"The startproject management command is able to use a different project\", \"Startproject can use a project template from a tarball and create it in a specified location\", \"The startproject management command is able to render templates with\", \"Make sure the startproject management command is able to render custom files\", \"startproject validates that project name doesn't clash with existing\", \"Make sure the startproject management command validates a project name\", \"Make sure template context variables are not html escaped\", \"Startproject management command handles project template tar/zip balls from non-canonical urls\", \"Make sure the startproject management command creates a project\", \"Make sure the startproject management command creates a project in a specific directory\", \"Ticket 17475: Template dir passed has a trailing path separator\", \"Make sure passing the wrong kinds of arguments outputs an error and prints usage\", \"User AppCommands can execute when a single app name is provided\", \"User AppCommands raise an error when multiple app names are provided\", \"User AppCommands raise an error when no app name is provided\", \"User AppCommands can execute when some of the provided app names are invalid\", \"User BaseCommands can execute when a label is provided\", \"User BaseCommands can execute when no labels are provided\", \"User BaseCommands can execute with options when a label is provided\", \"User BaseCommands can execute with multiple options when a label is provided\", \"User BaseCommands outputs command usage when wrong option is specified\", \"Test run_from_argv properly terminates even with custom execute() (#19665)\", \"test_color_style (admin_scripts.tests.CommandTypes)\", \"test_command_color (admin_scripts.tests.CommandTypes)\", \"--no-color prevent colorization of the output\", \"test_custom_stderr (admin_scripts.tests.CommandTypes)\", \"test_custom_stdout (admin_scripts.tests.CommandTypes)\", \"test_force_color_command_init (admin_scripts.tests.CommandTypes)\", \"test_force_color_execute (admin_scripts.tests.CommandTypes)\", \"help is handled as a special case\", \"--help is equivalent to help\", \"help --commands shows the list of all available commands\", \"-h is handled as a short form of --help\", \"User LabelCommands can execute when a label is provided\", \"User LabelCommands are executed multiple times if multiple labels are provided\", \"User LabelCommands raise an error if no label is provided\", \"test_no_color_force_color_mutually_exclusive_command_init (admin_scripts.tests.CommandTypes)\", \"test_no_color_force_color_mutually_exclusive_execute (admin_scripts.tests.CommandTypes)\", \"NoArg Commands can be executed\", \"NoArg Commands raise an error if an argument is provided\", \"A command called from the command line should close connections after\", \"Non-ASCII message of CommandError does not raise any\", \"--help can be used on a specific command\", \"version is handled as a special case\", \"--version is equivalent to version\"]", + "expected_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "test_file_spans": { + "tests/admin_scripts/tests.py": [ + "StartApp.test_overlaying_app" + ] + }, + "resolved_by": [ + { + "name": "20240523_aider", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240524_opencsg_starship_gpt4", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240630_agentless_gpt4o", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240925_hyperagent_lite1", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240828_autose_mixed", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240615_appmap-navie_gpt4o", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240530_autocoderover-v20240408", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240612_MASAI_gpt4o", + "updated_spans": {}, + "alternative_spans": {} + }, + { + "name": "20240702_codestory_aide_mixed", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240706_sima_gpt4o", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240623_moatless_claude35sonnet", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240806_SuperCoder2.0", + "updated_spans": { + "django/core/management/templates.py": [ + "imports", + "TemplateCommand", + "TemplateCommand.add_arguments", + "TemplateCommand.handle", + "TemplateCommand.handle_template", + "TemplateCommand.validate_name", + "TemplateCommand.download", + "TemplateCommand.extract" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "imports", + "TemplateCommand", + "TemplateCommand.add_arguments", + "TemplateCommand.handle", + "TemplateCommand.handle_template", + "TemplateCommand.validate_name", + "TemplateCommand.download", + "TemplateCommand.extract" + ] + } + }, + { + "name": "20240820_honeycomb", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240808_RepoGraph_gpt4o", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240908_infant_gpt4o", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240829_Isoform", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240604_CodeR", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240728_sweagent_gpt4o", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20241028_agentless-1.5_gpt4o", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240622_Lingma_Agent", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20241016_IBM-SWE-1.0", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240402_sweagent_claude3opus", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20241025_OpenHands-CodeAct-2.1-sonnet-20241022", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240612_IBM_Research_Agent101", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240621_autocoderover-v20240620", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240617_factory_code_droid", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240402_sweagent_gpt4", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240912_marscode-agent-dev", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240617_moatless_gpt4o", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240509_amazon-q-developer-agent-20240430-dev", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240811_gru", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240627_abanteai_mentatbot_gpt4o", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + }, + { + "name": "20240721_amazon-q-developer-agent-20240719-dev", + "updated_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + }, + "alternative_spans": { + "django/core/management/templates.py": [ + "TemplateCommand.handle" + ] + } + } + ], + "alternative_spans": [] + }, + { + "instance_id": "pallets__flask-4045", + "repo": "pallets/flask", + "base_commit": "d8c37f43724cd9fb0870f77877b7c4c7e38a19e0", + "problem_statement": "Raise error when blueprint name contains a dot\nThis is required since every dot is now significant since blueprints can be nested. An error was already added for endpoint names in 1.0, but should have been added for this as well.\n", + "golden_patch": "diff --git a/src/flask/blueprints.py b/src/flask/blueprints.py\n--- a/src/flask/blueprints.py\n+++ b/src/flask/blueprints.py\n@@ -188,6 +188,10 @@ def __init__(\n template_folder=template_folder,\n root_path=root_path,\n )\n+\n+ if \".\" in name:\n+ raise ValueError(\"'name' may not contain a dot '.' character.\")\n+\n self.name = name\n self.url_prefix = url_prefix\n self.subdomain = subdomain\n@@ -360,12 +364,12 @@ def add_url_rule(\n \"\"\"Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for\n the :func:`url_for` function is prefixed with the name of the blueprint.\n \"\"\"\n- if endpoint:\n- assert \".\" not in endpoint, \"Blueprint endpoints should not contain dots\"\n- if view_func and hasattr(view_func, \"__name__\"):\n- assert (\n- \".\" not in view_func.__name__\n- ), \"Blueprint view function name should not contain dots\"\n+ if endpoint and \".\" in endpoint:\n+ raise ValueError(\"'endpoint' may not contain a dot '.' character.\")\n+\n+ if view_func and hasattr(view_func, \"__name__\") and \".\" in view_func.__name__:\n+ raise ValueError(\"'view_func' name may not contain a dot '.' character.\")\n+\n self.record(lambda s: s.add_url_rule(rule, endpoint, view_func, **options))\n \n def app_template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n", + "test_patch": "diff --git a/tests/test_basic.py b/tests/test_basic.py\n--- a/tests/test_basic.py\n+++ b/tests/test_basic.py\n@@ -1631,7 +1631,7 @@ def something_else():\n \n \n def test_inject_blueprint_url_defaults(app):\n- bp = flask.Blueprint(\"foo.bar.baz\", __name__, template_folder=\"template\")\n+ bp = flask.Blueprint(\"foo\", __name__, template_folder=\"template\")\n \n @bp.url_defaults\n def bp_defaults(endpoint, values):\n@@ -1644,12 +1644,12 @@ def view(page):\n app.register_blueprint(bp)\n \n values = dict()\n- app.inject_url_defaults(\"foo.bar.baz.view\", values)\n+ app.inject_url_defaults(\"foo.view\", values)\n expected = dict(page=\"login\")\n assert values == expected\n \n with app.test_request_context(\"/somepage\"):\n- url = flask.url_for(\"foo.bar.baz.view\")\n+ url = flask.url_for(\"foo.view\")\n expected = \"/login\"\n assert url == expected\n \ndiff --git a/tests/test_blueprints.py b/tests/test_blueprints.py\n--- a/tests/test_blueprints.py\n+++ b/tests/test_blueprints.py\n@@ -1,5 +1,3 @@\n-import functools\n-\n import pytest\n from jinja2 import TemplateNotFound\n from werkzeug.http import parse_cache_control_header\n@@ -253,28 +251,9 @@ def test_templates_list(test_apps):\n assert templates == [\"admin/index.html\", \"frontend/index.html\"]\n \n \n-def test_dotted_names(app, client):\n- frontend = flask.Blueprint(\"myapp.frontend\", __name__)\n- backend = flask.Blueprint(\"myapp.backend\", __name__)\n-\n- @frontend.route(\"/fe\")\n- def frontend_index():\n- return flask.url_for(\"myapp.backend.backend_index\")\n-\n- @frontend.route(\"/fe2\")\n- def frontend_page2():\n- return flask.url_for(\".frontend_index\")\n-\n- @backend.route(\"/be\")\n- def backend_index():\n- return flask.url_for(\"myapp.frontend.frontend_index\")\n-\n- app.register_blueprint(frontend)\n- app.register_blueprint(backend)\n-\n- assert client.get(\"/fe\").data.strip() == b\"/be\"\n- assert client.get(\"/fe2\").data.strip() == b\"/fe\"\n- assert client.get(\"/be\").data.strip() == b\"/fe\"\n+def test_dotted_name_not_allowed(app, client):\n+ with pytest.raises(ValueError):\n+ flask.Blueprint(\"app.ui\", __name__)\n \n \n def test_dotted_names_from_app(app, client):\n@@ -343,62 +322,19 @@ def index():\n def test_route_decorator_custom_endpoint_with_dots(app, client):\n bp = flask.Blueprint(\"bp\", __name__)\n \n- @bp.route(\"/foo\")\n- def foo():\n- return flask.request.endpoint\n-\n- try:\n-\n- @bp.route(\"/bar\", endpoint=\"bar.bar\")\n- def foo_bar():\n- return flask.request.endpoint\n-\n- except AssertionError:\n- pass\n- else:\n- raise AssertionError(\"expected AssertionError not raised\")\n-\n- try:\n-\n- @bp.route(\"/bar/123\", endpoint=\"bar.123\")\n- def foo_bar_foo():\n- return flask.request.endpoint\n-\n- except AssertionError:\n- pass\n- else:\n- raise AssertionError(\"expected AssertionError not raised\")\n-\n- def foo_foo_foo():\n- pass\n-\n- pytest.raises(\n- AssertionError,\n- lambda: bp.add_url_rule(\"/bar/123\", endpoint=\"bar.123\", view_func=foo_foo_foo),\n- )\n-\n- pytest.raises(\n- AssertionError, bp.route(\"/bar/123\", endpoint=\"bar.123\"), lambda: None\n- )\n-\n- foo_foo_foo.__name__ = \"bar.123\"\n+ with pytest.raises(ValueError):\n+ bp.route(\"/\", endpoint=\"a.b\")(lambda: \"\")\n \n- pytest.raises(\n- AssertionError, lambda: bp.add_url_rule(\"/bar/123\", view_func=foo_foo_foo)\n- )\n+ with pytest.raises(ValueError):\n+ bp.add_url_rule(\"/\", endpoint=\"a.b\")\n \n- bp.add_url_rule(\n- \"/bar/456\", endpoint=\"foofoofoo\", view_func=functools.partial(foo_foo_foo)\n- )\n+ def view():\n+ return \"\"\n \n- app.register_blueprint(bp, url_prefix=\"/py\")\n+ view.__name__ = \"a.b\"\n \n- assert client.get(\"/py/foo\").data == b\"bp.foo\"\n- # The rule's didn't actually made it through\n- rv = client.get(\"/py/bar\")\n- assert rv.status_code == 404\n- rv = client.get(\"/py/bar/123\")\n- assert rv.status_code == 404\n+ with pytest.raises(ValueError):\n+ bp.add_url_rule(\"/\", view_func=view)\n \n \n def test_endpoint_decorator(app, client):\n", + "fail_to_pass": "[\"tests/test_blueprints.py::test_dotted_name_not_allowed\", \"tests/test_blueprints.py::test_route_decorator_custom_endpoint_with_dots\"]", + "pass_to_pass": "[\"tests/test_basic.py::test_method_route_no_methods\", \"tests/test_basic.py::test_disallow_string_for_allowed_methods\", \"tests/test_basic.py::test_error_handler_unknown_code\", \"tests/test_basic.py::test_request_locals\", \"tests/test_basic.py::test_exception_propagation\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[None-True-True-True]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[None-True-True-False]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[None-True-False-True]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[None-True-False-False]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[None-False-True-True]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[None-False-True-False]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[None-False-False-True]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[None-False-False-False]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[True-True-True-True]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[True-True-True-False]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[True-True-False-True]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[True-True-False-False]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[True-False-True-True]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[True-False-True-False]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[True-False-False-True]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[True-False-False-False]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[False-True-True-True]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[False-True-True-False]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[False-True-False-True]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[False-True-False-False]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[False-False-True-True]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[False-False-True-False]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[False-False-False-True]\", \"tests/test_basic.py::test_werkzeug_passthrough_errors[False-False-False-False]\", \"tests/test_basic.py::test_get_method_on_g\", \"tests/test_basic.py::test_g_iteration_protocol\", \"tests/test_basic.py::test_run_defaults\", \"tests/test_basic.py::test_run_server_port\", \"tests/test_basic.py::test_run_from_config[None-None-pocoo.org:8080-pocoo.org-8080]\", \"tests/test_basic.py::test_run_from_config[localhost-None-pocoo.org:8080-localhost-8080]\", \"tests/test_basic.py::test_run_from_config[None-80-pocoo.org:8080-pocoo.org-80]\", \"tests/test_basic.py::test_run_from_config[localhost-80-pocoo.org:8080-localhost-80]\", \"tests/test_basic.py::test_run_from_config[localhost-0-localhost:8080-localhost-0]\", \"tests/test_basic.py::test_run_from_config[None-None-localhost:8080-localhost-8080]\", \"tests/test_basic.py::test_run_from_config[None-None-localhost:0-localhost-0]\", \"tests/test_basic.py::test_app_freed_on_zero_refcount\", \"tests/test_blueprints.py::test_template_filter\", \"tests/test_blueprints.py::test_add_template_filter\", \"tests/test_blueprints.py::test_template_filter_with_name\", \"tests/test_blueprints.py::test_add_template_filter_with_name\", \"tests/test_blueprints.py::test_template_test\", \"tests/test_blueprints.py::test_add_template_test\", \"tests/test_blueprints.py::test_template_test_with_name\", \"tests/test_blueprints.py::test_add_template_test_with_name\", \"tests/test_blueprints.py::test_template_global\"]", + "expected_spans": { + "src/flask/blueprints.py": [ + "Blueprint.__init__", + "Blueprint.add_url_rule" + ] + }, + "test_file_spans": { + "tests/test_basic.py": [ + "test_inject_blueprint_url_defaults" + ], + "tests/test_blueprints.py": [ + "imports", + "test_dotted_names", + "test_route_decorator_custom_endpoint_with_dots" + ] + }, + "resolved_by": [], + "alternative_spans": [] + }, + { + "instance_id": "django__django-14411", + "repo": "django/django", + "base_commit": "fa4e963ee7e6876581b5432363603571839ba00c", + "problem_statement": "Label for ReadOnlyPasswordHashWidget points to non-labelable element.\nDescription\n\t \n\t\t(last modified by David Sanders)\n\t \nIn the admin, the label element for the ReadOnlyPasswordHashWidget widget has a 'for' attribute which points to a non-labelable element, since the widget just renders text, not an input. There's no labelable element for the widget, so the label shouldn't have a 'for' attribute.\n", + "golden_patch": "diff --git a/django/contrib/auth/forms.py b/django/contrib/auth/forms.py\n--- a/django/contrib/auth/forms.py\n+++ b/django/contrib/auth/forms.py\n@@ -50,6 +50,9 @@ def get_context(self, name, value, attrs):\n context['summary'] = summary\n return context\n \n+ def id_for_label(self, id_):\n+ return None\n+\n \n class ReadOnlyPasswordHashField(forms.Field):\n widget = ReadOnlyPasswordHashWidget\n", + "test_patch": "diff --git a/tests/auth_tests/test_forms.py b/tests/auth_tests/test_forms.py\n--- a/tests/auth_tests/test_forms.py\n+++ b/tests/auth_tests/test_forms.py\n@@ -13,6 +13,7 @@\n from django.core import mail\n from django.core.exceptions import ValidationError\n from django.core.mail import EmailMultiAlternatives\n+from django.forms import forms\n from django.forms.fields import CharField, Field, IntegerField\n from django.test import SimpleTestCase, TestCase, override_settings\n from django.utils import translation\n@@ -1025,6 +1026,18 @@ def test_readonly_field_has_changed(self):\n self.assertIs(field.disabled, True)\n self.assertFalse(field.has_changed('aaa', 'bbb'))\n \n+ def test_label(self):\n+ \"\"\"\n+ ReadOnlyPasswordHashWidget doesn't contain a for attribute in the\n+