From 70c3be88b97cbd7440ab7ebe9548c60ccd82635c Mon Sep 17 00:00:00 2001 From: InSync Date: Mon, 13 Jan 2025 09:03:11 +0700 Subject: [PATCH] [`flake8-pie`] Reuse parsed tokens (`PIE800`) (#15438) ## Summary Follow-up to #15394. See [this review comment](https://github.com/astral-sh/ruff/pull/15394#discussion_r1910526741). ## Test Plan `cargo nextest run` and `cargo insta test`. --- .../flake8_pie/rules/unnecessary_spread.rs | 29 ++++++++++--------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/crates/ruff_linter/src/rules/flake8_pie/rules/unnecessary_spread.rs b/crates/ruff_linter/src/rules/flake8_pie/rules/unnecessary_spread.rs index 38ba2fc7a9736..efbcada3bda28 100644 --- a/crates/ruff_linter/src/rules/flake8_pie/rules/unnecessary_spread.rs +++ b/crates/ruff_linter/src/rules/flake8_pie/rules/unnecessary_spread.rs @@ -1,11 +1,10 @@ use ruff_diagnostics::{Diagnostic, Edit, Fix, FixAvailability, Violation}; use ruff_macros::{derive_message_formats, ViolationMetadata}; use ruff_python_ast::{self as ast, Expr}; -use ruff_python_trivia::{SimpleTokenKind, SimpleTokenizer}; +use ruff_python_parser::{TokenKind, Tokens}; use ruff_text_size::{Ranged, TextSize}; use crate::checkers::ast::Checker; -use crate::Locator; /// ## What it does /// Checks for unnecessary dictionary unpacking operators (`**`). @@ -54,7 +53,7 @@ pub(crate) fn unnecessary_spread(checker: &mut Checker, dict: &ast::ExprDict) { // inside a dict. if let Expr::Dict(inner) = value { let mut diagnostic = Diagnostic::new(UnnecessarySpread, value.range()); - if let Some(fix) = unnecessary_spread_fix(inner, prev_end, checker.locator()) { + if let Some(fix) = unnecessary_spread_fix(inner, prev_end, checker.tokens()) { diagnostic.set_fix(fix); } checker.diagnostics.push(diagnostic); @@ -68,24 +67,27 @@ pub(crate) fn unnecessary_spread(checker: &mut Checker, dict: &ast::ExprDict) { fn unnecessary_spread_fix( dict: &ast::ExprDict, prev_end: TextSize, - locator: &Locator, + tokens: &Tokens, ) -> Option { // Find the `**` token preceding the spread. - let doublestar = SimpleTokenizer::starts_at(prev_end, locator.contents()) - .find(|tok| matches!(tok.kind(), SimpleTokenKind::DoubleStar))?; + let doublestar = tokens + .after(prev_end) + .iter() + .find(|tok| matches!(tok.kind(), TokenKind::DoubleStar))?; if let Some(last) = dict.iter_values().last() { // Ex) `**{a: 1, b: 2}` let mut edits = vec![]; let mut open_parens: u32 = 0; - for tok in SimpleTokenizer::starts_at(doublestar.end(), locator.contents()).skip_trivia() { + for tok in tokens.after(doublestar.end()) { match tok.kind() { - SimpleTokenKind::LParen => { + kind if kind.is_trivia() => {} + TokenKind::Lpar => { edits.push(Edit::range_deletion(tok.range())); open_parens += 1; } - SimpleTokenKind::LBrace => { + TokenKind::Lbrace => { edits.push(Edit::range_deletion(tok.range())); break; } @@ -97,22 +99,23 @@ fn unnecessary_spread_fix( } let mut found_r_curly = false; - for tok in SimpleTokenizer::starts_at(last.end(), locator.contents()).skip_trivia() { + for tok in tokens.after(last.end()) { if found_r_curly && open_parens == 0 { break; } match tok.kind() { - SimpleTokenKind::Comma => { + kind if kind.is_trivia() => {} + TokenKind::Comma => { edits.push(Edit::range_deletion(tok.range())); } - SimpleTokenKind::RParen => { + TokenKind::Rpar => { if found_r_curly { edits.push(Edit::range_deletion(tok.range())); open_parens -= 1; } } - SimpleTokenKind::RBrace => { + TokenKind::Rbrace => { edits.push(Edit::range_deletion(tok.range())); found_r_curly = true; }