From e9b726889218ebdbd5e5edd39fbfa5ada32c02d7 Mon Sep 17 00:00:00 2001 From: Andreas Reis <887320+twwn@users.noreply.github.com> Date: Fri, 27 Sep 2024 15:56:06 +0200 Subject: [PATCH] Anki: Replace lazy_static with once_cell Unify to once_cell, lazy_static's replacement. The latter in unmaintained. --- Cargo.lock | 7 +- build/ninja_gen/Cargo.toml | 2 +- build/ninja_gen/src/input.rs | 5 +- ftl/Cargo.toml | 2 +- ftl/src/garbage_collection.rs | 17 ++-- rslib/Cargo.toml | 1 - rslib/linkchecker/Cargo.toml | 2 +- rslib/linkchecker/tests/links.rs | 9 +- rslib/src/ankidroid/db.rs | 15 ++- rslib/src/ankihub/login.rs | 10 +- rslib/src/cloze.rs | 12 +-- rslib/src/import_export/text/csv/export.rs | 16 ++-- rslib/src/latex.rs | 20 ++-- rslib/src/lib.rs | 6 +- rslib/src/media/files.rs | 28 +++--- rslib/src/notetype/checks.rs | 6 +- rslib/src/notetype/mod.rs | 15 ++- rslib/src/scheduler/reviews.rs | 12 +-- rslib/src/search/parser.rs | 30 +++--- rslib/src/search/writer.rs | 7 +- rslib/src/template.rs | 12 +-- rslib/src/template_filters.rs | 6 +- rslib/src/text.rs | 101 +++++++++++---------- rslib/src/version.rs | 18 ++-- 24 files changed, 175 insertions(+), 184 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9643c9bfff2..4711ad5e7a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -134,7 +134,6 @@ dependencies = [ "id_tree", "inflections", "itertools 0.13.0", - "lazy_static", "nom", "num_cpus", "num_enum", @@ -1891,7 +1890,7 @@ dependencies = [ "clap", "fluent-syntax", "itertools 0.13.0", - "lazy_static", + "once_cell", "regex", "serde_json", "snafu", @@ -3167,8 +3166,8 @@ dependencies = [ "anki", "futures", "itertools 0.13.0", - "lazy_static", "linkcheck", + "once_cell", "regex", "reqwest 0.12.7", "strum 0.26.3", @@ -3609,9 +3608,9 @@ dependencies = [ "dunce", "globset", "itertools 0.13.0", - "lazy_static", "maplit", "num_cpus", + "once_cell", "walkdir", "which", ] diff --git a/build/ninja_gen/Cargo.toml b/build/ninja_gen/Cargo.toml index 408b187abc4..ea658697527 100644 --- a/build/ninja_gen/Cargo.toml +++ b/build/ninja_gen/Cargo.toml @@ -14,8 +14,8 @@ camino.workspace = true dunce.workspace = true globset.workspace = true itertools.workspace = true -lazy_static.workspace = true maplit.workspace = true num_cpus.workspace = true +once_cell.workspace = true walkdir.workspace = true which.workspace = true diff --git a/build/ninja_gen/src/input.rs b/build/ninja_gen/src/input.rs index 00ae604838c..9e3d4ebfc4b 100644 --- a/build/ninja_gen/src/input.rs +++ b/build/ninja_gen/src/input.rs @@ -5,6 +5,7 @@ use std::collections::HashMap; use std::fmt::Display; use camino::Utf8PathBuf; +use once_cell::sync::Lazy; #[derive(Debug, Clone, Hash, Default)] pub enum BuildInput { @@ -118,9 +119,7 @@ pub struct Glob { pub exclude: Option, } -lazy_static::lazy_static! { - static ref CACHED_FILES: Vec = cache_files(); -} +static CACHED_FILES: Lazy> = Lazy::new(|| cache_files()); /// Walking the source tree once instead of for each glob yields ~4x speed /// improvements. diff --git a/ftl/Cargo.toml b/ftl/Cargo.toml index 2bd9ff5e15e..677360f0ccb 100644 --- a/ftl/Cargo.toml +++ b/ftl/Cargo.toml @@ -16,7 +16,7 @@ camino.workspace = true clap.workspace = true fluent-syntax.workspace = true itertools.workspace = true -lazy_static.workspace = true +once_cell.workspace = true regex.workspace = true serde_json.workspace = true snafu.workspace = true diff --git a/ftl/src/garbage_collection.rs b/ftl/src/garbage_collection.rs index 3cbfbb4fa29..a6aee4cf1a2 100644 --- a/ftl/src/garbage_collection.rs +++ b/ftl/src/garbage_collection.rs @@ -14,7 +14,7 @@ use clap::Args; use fluent_syntax::ast; use fluent_syntax::ast::Resource; use fluent_syntax::parser; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use regex::Regex; use walkdir::DirEntry; use walkdir::WalkDir; @@ -144,9 +144,7 @@ fn extract_nested_messages_and_terms( ftl_roots: &[impl AsRef], used_ftls: &mut HashSet, ) { - lazy_static! { - static ref REFERENCE: Regex = Regex::new(r"\{\s*-?([-0-9a-z]+)\s*\}").unwrap(); - } + static REFERENCE: Lazy = Lazy::new(|| Regex::new(r"\{\s*-?([-0-9a-z]+)\s*\}").unwrap()); for_files_with_ending(ftl_roots, ".ftl", |entry| { let source = fs::read_to_string(entry.path()).expect("file not readable"); for caps in REFERENCE.captures_iter(&source) { @@ -198,11 +196,12 @@ fn entry_use_check(used_ftls: &HashSet) -> impl Fn(&ast::Entry<&str>) -> } fn extract_references_from_file(refs: &mut HashSet, entry: &DirEntry) { - lazy_static! { - static ref SNAKECASE_TR: Regex = Regex::new(r"\Wtr\s*\.([0-9a-z_]+)\W").unwrap(); - static ref CAMELCASE_TR: Regex = Regex::new(r"\Wtr2?\.([0-9A-Za-z_]+)\W").unwrap(); - static ref DESIGNER_STYLE_TR: Regex = Regex::new(r"([0-9a-z_]+)").unwrap(); - } + static SNAKECASE_TR: Lazy = + Lazy::new(|| Regex::new(r"\Wtr\s*\.([0-9a-z_]+)\W").unwrap()); + static CAMELCASE_TR: Lazy = + Lazy::new(|| Regex::new(r"\Wtr2?\.([0-9A-Za-z_]+)\W").unwrap()); + static DESIGNER_STYLE_TR: Lazy = + Lazy::new(|| Regex::new(r"([0-9a-z_]+)").unwrap()); let file_name = entry.file_name().to_str().expect("non-unicode filename"); diff --git a/rslib/Cargo.toml b/rslib/Cargo.toml index 39f8393f7be..fb14826b4a7 100644 --- a/rslib/Cargo.toml +++ b/rslib/Cargo.toml @@ -69,7 +69,6 @@ htmlescape.workspace = true hyper.workspace = true id_tree.workspace = true itertools.workspace = true -lazy_static.workspace = true nom.workspace = true num_cpus.workspace = true num_enum.workspace = true diff --git a/rslib/linkchecker/Cargo.toml b/rslib/linkchecker/Cargo.toml index c9a5a9ebbf1..16923e0972f 100644 --- a/rslib/linkchecker/Cargo.toml +++ b/rslib/linkchecker/Cargo.toml @@ -11,8 +11,8 @@ rust-version.workspace = true anki.workspace = true futures.workspace = true itertools.workspace = true -lazy_static.workspace = true linkcheck.workspace = true +once_cell.workspace = true regex.workspace = true reqwest.workspace = true strum.workspace = true diff --git a/rslib/linkchecker/tests/links.rs b/rslib/linkchecker/tests/links.rs index 04482c99af0..28ab46ae7c6 100644 --- a/rslib/linkchecker/tests/links.rs +++ b/rslib/linkchecker/tests/links.rs @@ -13,7 +13,7 @@ use anki::links::help_page_to_link; use anki::links::HelpPage; use futures::StreamExt; use itertools::Itertools; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use linkcheck::validation::check_web; use linkcheck::validation::Context; use linkcheck::validation::Reason; @@ -70,9 +70,10 @@ impl From<&'static str> for CheckableUrl { } fn ts_help_pages() -> impl Iterator { - lazy_static! { - static ref QUOTED_URL: Regex = Regex::new("\"(http.+)\"").unwrap(); - } + static QUOTED_URL: Lazy = Lazy::new(|| { + Regex::new("\"(http.+)\"").unwrap() + }); + QUOTED_URL .captures_iter(include_str!("../../../ts/lib/tslib/help-page.ts")) .map(|caps| caps.get(1).unwrap().as_str()) diff --git a/rslib/src/ankidroid/db.rs b/rslib/src/ankidroid/db.rs index 32b069ef8a1..3cc5fe7e4b8 100644 --- a/rslib/src/ankidroid/db.rs +++ b/rslib/src/ankidroid/db.rs @@ -16,7 +16,7 @@ use itertools::FoldWhile; use itertools::FoldWhile::Continue; use itertools::FoldWhile::Done; use itertools::Itertools; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use rusqlite::ToSql; use serde::Deserialize; @@ -110,10 +110,8 @@ fn select_slice_of_size<'a>( type SequenceNumber = i32; -lazy_static! { - static ref HASHMAP: Mutex>> = - Mutex::new(HashMap::new()); -} +static HASHMAP: Lazy>>> = + Lazy::new(|| Mutex::new(HashMap::new())); pub(crate) fn flush_single_result(col: &Collection, sequence_number: i32) { HASHMAP @@ -244,10 +242,9 @@ pub(crate) fn next_sequence_number() -> i32 { SEQUENCE_NUMBER.fetch_add(1, Ordering::SeqCst) } -lazy_static! { - // same as we get from io.requery.android.database.CursorWindow.sCursorWindowSize - static ref DB_COMMAND_PAGE_SIZE: Mutex = Mutex::new(1024 * 1024 * 2); -} +// same as we get from +// io.requery.android.database.CursorWindow.sCursorWindowSize +static DB_COMMAND_PAGE_SIZE: Lazy> = Lazy::new(|| Mutex::new(1024 * 1024 * 2)); pub(crate) fn set_max_page_size(size: usize) { let mut state = DB_COMMAND_PAGE_SIZE.lock().expect("Could not lock mutex"); diff --git a/rslib/src/ankihub/login.rs b/rslib/src/ankihub/login.rs index 42aacec596b..a77e17a95cb 100644 --- a/rslib/src/ankihub/login.rs +++ b/rslib/src/ankihub/login.rs @@ -1,7 +1,7 @@ // Copyright: Ankitects Pty Ltd and contributors // License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use regex::Regex; use reqwest::Client; use serde; @@ -31,11 +31,9 @@ pub async fn ankihub_login>( client: Client, ) -> Result { let client = HttpAnkiHubClient::new("", client); - lazy_static! { - static ref EMAIL_RE: Regex = - Regex::new(r"^[a-zA-Z0-9.!#$%&’*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$") - .unwrap(); - } + static EMAIL_RE: Lazy = Lazy::new(|| { + Regex::new(r"^[a-zA-Z0-9.!#$%&’*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*$").unwrap() + }); let mut request = LoginRequest { username: None, email: None, diff --git a/rslib/src/cloze.rs b/rslib/src/cloze.rs index 1fb97ed8501..f82e0f82332 100644 --- a/rslib/src/cloze.rs +++ b/rslib/src/cloze.rs @@ -9,12 +9,12 @@ use std::fmt::Write; use anki_proto::image_occlusion::get_image_occlusion_note_response::ImageOcclusion; use anki_proto::image_occlusion::get_image_occlusion_note_response::ImageOcclusionShape; use htmlescape::encode_attribute; -use lazy_static::lazy_static; use nom::branch::alt; use nom::bytes::complete::tag; use nom::bytes::complete::take_while; use nom::combinator::map; use nom::IResult; +use once_cell::sync::Lazy; use regex::Captures; use regex::Regex; @@ -24,16 +24,16 @@ use crate::latex::contains_latex; use crate::template::RenderContext; use crate::text::strip_html_preserving_entities; -lazy_static! { - static ref MATHJAX: Regex = Regex::new( +static MATHJAX: Lazy = Lazy::new(|| { + Regex::new( r"(?xsi) (\\[(\[]) # 1 = mathjax opening tag (.*?) # 2 = inner content (\\[])]) # 3 = mathjax closing tag - " + ", ) - .unwrap(); -} + .unwrap() +}); mod mathjax_caps { pub const OPENING_TAG: usize = 1; diff --git a/rslib/src/import_export/text/csv/export.rs b/rslib/src/import_export/text/csv/export.rs index 0f1e712e81e..6fa14ab9461 100644 --- a/rslib/src/import_export/text/csv/export.rs +++ b/rslib/src/import_export/text/csv/export.rs @@ -9,7 +9,7 @@ use std::sync::Arc; use anki_proto::import_export::ExportNoteCsvRequest; use itertools::Itertools; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use regex::Regex; use super::metadata::Delimiter; @@ -156,23 +156,21 @@ fn field_to_record_field(field: &str, with_html: bool) -> Cow { } fn strip_redundant_sections(text: &str) -> Cow { - lazy_static! { - static ref RE: Regex = Regex::new( + static RE: Lazy = Lazy::new(|| { + Regex::new( r"(?isx) # style elements | \[\[type:[^]]+\]\] # type replacements - " + ", ) - .unwrap(); - } + .unwrap() + }); RE.replace_all(text.as_ref(), "") } fn strip_answer_side_question(text: &str) -> Cow { - lazy_static! { - static ref RE: Regex = Regex::new(r"(?is)^.*
\n*").unwrap(); - } + static RE: Lazy = Lazy::new(|| Regex::new(r"(?is)^.*
\n*").unwrap()); RE.replace_all(text.as_ref(), "") } diff --git a/rslib/src/latex.rs b/rslib/src/latex.rs index f6a03c107fa..d164b3cfd3d 100644 --- a/rslib/src/latex.rs +++ b/rslib/src/latex.rs @@ -3,7 +3,7 @@ use std::borrow::Cow; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use regex::Captures; use regex::Regex; @@ -11,26 +11,28 @@ use crate::cloze::expand_clozes_to_reveal_latex; use crate::media::files::sha1_of_data; use crate::text::strip_html; -lazy_static! { - pub(crate) static ref LATEX: Regex = Regex::new( +pub(crate) static LATEX: Lazy = Lazy::new(|| { + Regex::new( r"(?xsi) \[latex\](.+?)\[/latex\] # 1 - standard latex | \[\$\](.+?)\[/\$\] # 2 - inline math | \[\$\$\](.+?)\[/\$\$\] # 3 - math environment - " + ", ) - .unwrap(); - static ref LATEX_NEWLINES: Regex = Regex::new( + .unwrap() +}); +static LATEX_NEWLINES: Lazy = Lazy::new(|| { + Regex::new( r#"(?xi) |
- "# + "#, ) - .unwrap(); -} + .unwrap() +}); pub(crate) fn contains_latex(text: &str) -> bool { LATEX.is_match(text) diff --git a/rslib/src/lib.rs b/rslib/src/lib.rs index 8d3251f49dc..8b73877064d 100644 --- a/rslib/src/lib.rs +++ b/rslib/src/lib.rs @@ -53,8 +53,6 @@ pub mod version; use std::env; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; -lazy_static! { - pub(crate) static ref PYTHON_UNIT_TESTS: bool = env::var("ANKI_TEST_MODE").is_ok(); -} +pub(crate) static PYTHON_UNIT_TESTS: Lazy = Lazy::new(|| env::var("ANKI_TEST_MODE").is_ok()); diff --git a/rslib/src/media/files.rs b/rslib/src/media/files.rs index d60e70791e6..6f0a6b2fb6d 100644 --- a/rslib/src/media/files.rs +++ b/rslib/src/media/files.rs @@ -15,7 +15,7 @@ use anki_io::write_file; use anki_io::FileIoError; use anki_io::FileIoSnafu; use anki_io::FileOp; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use regex::Regex; use sha1::Digest; use sha1::Sha1; @@ -27,8 +27,8 @@ use unicode_normalization::UnicodeNormalization; use crate::prelude::*; use crate::sync::media::MAX_MEDIA_FILENAME_LENGTH; -lazy_static! { - static ref WINDOWS_DEVICE_NAME: Regex = Regex::new( +static WINDOWS_DEVICE_NAME: Lazy = Lazy::new(|| { + Regex::new( r"(?xi) # starting with one of the following names ^ @@ -39,30 +39,34 @@ lazy_static! { ( \. | $ ) - " + ", ) - .unwrap(); - static ref WINDOWS_TRAILING_CHAR: Regex = Regex::new( + .unwrap() +}); +static WINDOWS_TRAILING_CHAR: Lazy = Lazy::new(|| { + Regex::new( r"(?x) # filenames can't end with a space or period ( \x20 | \. ) $ - " + ", ) - .unwrap(); - pub(crate) static ref NONSYNCABLE_FILENAME: Regex = Regex::new( + .unwrap() +}); +pub(crate) static NONSYNCABLE_FILENAME: Lazy = Lazy::new(|| { + Regex::new( r#"(?xi) ^ (:? thumbs.db | .ds_store ) $ - "# + "#, ) - .unwrap(); -} + .unwrap() +}); /// True if character may cause problems on one or more platforms. fn disallowed_char(char: char) -> bool { diff --git a/rslib/src/notetype/checks.rs b/rslib/src/notetype/checks.rs index 1cd6320dc89..6ecd1d73a29 100644 --- a/rslib/src/notetype/checks.rs +++ b/rslib/src/notetype/checks.rs @@ -6,7 +6,7 @@ use std::fmt::Write; use std::ops::Deref; use anki_i18n::without_unicode_isolation; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use regex::Captures; use regex::Match; use regex::Regex; @@ -24,9 +24,7 @@ struct Template<'a> { front: bool, } -lazy_static! { - static ref FIELD_REPLACEMENT: Regex = Regex::new(r"\{\{.+\}\}").unwrap(); -} +static FIELD_REPLACEMENT: Lazy = Lazy::new(|| Regex::new(r"\{\{.+\}\}").unwrap()); impl Collection { pub fn report_media_field_referencing_templates(&mut self, buf: &mut String) -> Result<()> { diff --git a/rslib/src/notetype/mod.rs b/rslib/src/notetype/mod.rs index 52489e618f1..d69a139152e 100644 --- a/rslib/src/notetype/mod.rs +++ b/rslib/src/notetype/mod.rs @@ -33,9 +33,9 @@ pub use anki_proto::notetypes::Notetype as NotetypeProto; pub(crate) use cardgen::AlreadyGeneratedCardInfo; pub(crate) use cardgen::CardGenContext; pub use fields::NoteField; -use lazy_static::lazy_static; pub use notetypechange::ChangeNotetypeInput; pub use notetypechange::NotetypeChangeInfo; +use once_cell::sync::Lazy; use regex::Regex; pub(crate) use render::RenderCardOutput; pub use schema11::CardTemplateSchema11; @@ -67,9 +67,8 @@ pub(crate) const DEFAULT_CSS: &str = include_str!("styling.css"); pub(crate) const DEFAULT_CLOZE_CSS: &str = include_str!("cloze_styling.css"); pub(crate) const DEFAULT_LATEX_HEADER: &str = include_str!("header.tex"); pub(crate) const DEFAULT_LATEX_FOOTER: &str = r"\end{document}"; -lazy_static! { - /// New entries must be handled in render.rs/add_special_fields(). - static ref SPECIAL_FIELDS: HashSet<&'static str> = HashSet::from_iter(vec![ +static SPECIAL_FIELDS: Lazy> = Lazy::new(|| { + HashSet::from_iter(vec![ "FrontSide", "Card", "CardFlag", @@ -77,8 +76,8 @@ lazy_static! { "Subdeck", "Tags", "Type", - ]); -} + ]) +}); #[derive(Debug, PartialEq, Clone)] pub struct Notetype { @@ -365,9 +364,7 @@ impl Notetype { } fn ensure_template_fronts_unique(&self) -> Result<(), CardTypeError> { - lazy_static! { - static ref CARD_TAG: Regex = Regex::new(r"\{\{\s*Card\s*\}\}").unwrap(); - } + static CARD_TAG: Lazy = Lazy::new(|| Regex::new(r"\{\{\s*Card\s*\}\}").unwrap()); let mut map = HashMap::new(); for (index, card) in self.templates.iter().enumerate() { diff --git a/rslib/src/scheduler/reviews.rs b/rslib/src/scheduler/reviews.rs index 2fac6eab576..2bfd9ce06fb 100644 --- a/rslib/src/scheduler/reviews.rs +++ b/rslib/src/scheduler/reviews.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use rand::distributions::Distribution; use rand::distributions::Uniform; use regex::Regex; @@ -65,8 +65,8 @@ pub struct DueDateSpecifier { } pub fn parse_due_date_str(s: &str) -> Result { - lazy_static! { - static ref RE: Regex = Regex::new( + static RE: Lazy = Lazy::new(|| { + Regex::new( r"(?x)^ # a number (?P\d+) @@ -78,10 +78,10 @@ pub fn parse_due_date_str(s: &str) -> Result { # optional exclamation mark (?P!)? $ - " + ", ) - .unwrap(); - } + .unwrap() + }); let caps = RE.captures(s).or_invalid(s)?; let min: u32 = caps.name("min").unwrap().as_str().parse()?; let max = if let Some(max) = caps.name("max") { diff --git a/rslib/src/search/parser.rs b/rslib/src/search/parser.rs index 8af7991b3f1..019e1a892a3 100644 --- a/rslib/src/search/parser.rs +++ b/rslib/src/search/parser.rs @@ -1,7 +1,6 @@ // Copyright: Ankitects Pty Ltd and contributors // License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html -use lazy_static::lazy_static; use nom::branch::alt; use nom::bytes::complete::escaped; use nom::bytes::complete::is_not; @@ -18,6 +17,7 @@ use nom::error::ErrorKind as NomErrorKind; use nom::multi::many0; use nom::sequence::preceded; use nom::sequence::separated_pair; +use once_cell::sync::Lazy; use regex::Captures; use regex::Regex; @@ -621,9 +621,7 @@ fn parse_mid(s: &str) -> ParseResult { /// ensure a list of ids contains only numbers and commas, returning unchanged /// if true used by nid: and cid: fn check_id_list<'a>(s: &'a str, context: &str) -> ParseResult<'a, &'a str> { - lazy_static! { - static ref RE: Regex = Regex::new(r"^(\d+,)*\d+$").unwrap(); - } + static RE: Lazy = Lazy::new(|| Regex::new(r"^(\d+,)*\d+$").unwrap()); if RE.is_match(s) { Ok(s) } else { @@ -700,9 +698,7 @@ fn unescape(txt: &str) -> ParseResult { )) } else { Ok(if is_parser_escape(txt) { - lazy_static! { - static ref RE: Regex = Regex::new(r#"\\[\\":()-]"#).unwrap(); - } + static RE: Lazy = Lazy::new(|| Regex::new(r#"\\[\\":()-]"#).unwrap()); RE.replace_all(txt, |caps: &Captures| match &caps[0] { r"\\" => r"\\", "\\\"" => "\"", @@ -722,17 +718,17 @@ fn unescape(txt: &str) -> ParseResult { /// Return invalid escape sequence if any. fn invalid_escape_sequence(txt: &str) -> Option { // odd number of \s not followed by an escapable character - lazy_static! { - static ref RE: Regex = Regex::new( + static RE: Lazy = Lazy::new(|| { + Regex::new( r#"(?x) (?:^|[^\\]) # not a backslash (?:\\\\)* # even number of backslashes (\\ # single backslash (?:[^\\":*_()-]|$)) # anything but an escapable char - "# + "#, ) - .unwrap(); - } + .unwrap() + }); let caps = RE.captures(txt)?; Some(caps[1].to_string()) @@ -741,17 +737,17 @@ fn invalid_escape_sequence(txt: &str) -> Option { /// Check string for escape sequences handled by the parser: ":()- fn is_parser_escape(txt: &str) -> bool { // odd number of \s followed by a char with special meaning to the parser - lazy_static! { - static ref RE: Regex = Regex::new( + static RE: Lazy = Lazy::new(|| { + Regex::new( r#"(?x) (?:^|[^\\]) # not a backslash (?:\\\\)* # even number of backslashes \\ # single backslash [":()-] # parser escape - "# + "#, ) - .unwrap(); - } + .unwrap() + }); RE.is_match(txt) } diff --git a/rslib/src/search/writer.rs b/rslib/src/search/writer.rs index 181ecf9d96f..5567545b95e 100644 --- a/rslib/src/search/writer.rs +++ b/rslib/src/search/writer.rs @@ -3,7 +3,7 @@ use std::mem; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use regex::Regex; use crate::notetype::NotetypeId as NotetypeIdType; @@ -109,9 +109,8 @@ fn maybe_quote(txt: &str) -> String { /// Checks for the reserved keywords "and" and "or", a prepended hyphen, /// whitespace and brackets. fn needs_quotation(txt: &str) -> bool { - lazy_static! { - static ref RE: Regex = Regex::new("(?i)^and$|^or$|^-.| |\u{3000}|\\(|\\)").unwrap(); - } + static RE: Lazy = + Lazy::new(|| Regex::new("(?i)^and$|^or$|^-.| |\u{3000}|\\(|\\)").unwrap()); RE.is_match(txt) } diff --git a/rslib/src/template.rs b/rslib/src/template.rs index 3e09af6ad6a..13b70616b36 100644 --- a/rslib/src/template.rs +++ b/rslib/src/template.rs @@ -8,7 +8,6 @@ use std::fmt::Write; use std::iter; use anki_i18n::I18n; -use lazy_static::lazy_static; use nom::branch::alt; use nom::bytes::complete::tag; use nom::bytes::complete::take_until; @@ -16,6 +15,7 @@ use nom::combinator::map; use nom::combinator::rest; use nom::combinator::verify; use nom::sequence::delimited; +use once_cell::sync::Lazy; use regex::Regex; use crate::cloze::add_cloze_numbers_in_string; @@ -546,18 +546,18 @@ fn append_str_to_nodes(nodes: &mut Vec, text: &str) { /// True if provided text contains only whitespace and/or empty BR/DIV tags. pub(crate) fn field_is_empty(text: &str) -> bool { - lazy_static! { - static ref RE: Regex = Regex::new( + static RE: Lazy = Lazy::new(|| { + Regex::new( r"(?xsi) ^(?: [[:space:]] | )*$ - " + ", ) - .unwrap(); - } + .unwrap() + }); RE.is_match(text) } diff --git a/rslib/src/template_filters.rs b/rslib/src/template_filters.rs index b6408d965a4..f1246f7a64d 100644 --- a/rslib/src/template_filters.rs +++ b/rslib/src/template_filters.rs @@ -4,7 +4,7 @@ use std::borrow::Cow; use blake3::Hasher; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use regex::Captures; use regex::Regex; @@ -107,9 +107,7 @@ fn apply_filter( // Ruby filters //---------------------------------------- -lazy_static! { - static ref FURIGANA: Regex = Regex::new(r" ?([^ >]+?)\[(.+?)\]").unwrap(); -} +static FURIGANA: Lazy = Lazy::new(|| Regex::new(r" ?([^ >]+?)\[(.+?)\]").unwrap()); /// Did furigana regex match a sound tag? fn captured_sound(caps: &Captures) -> bool { diff --git a/rslib/src/text.rs b/rslib/src/text.rs index b32ef45c1f8..c2f6103d783 100644 --- a/rslib/src/text.rs +++ b/rslib/src/text.rs @@ -3,7 +3,7 @@ use std::borrow::Cow; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; use percent_encoding_iri::percent_decode_str; use percent_encoding_iri::utf8_percent_encode; use percent_encoding_iri::AsciiSet; @@ -78,17 +78,18 @@ pub enum AvTag { }, } -lazy_static! { - static ref HTML: Regex = Regex::new(concat!( +static HTML: Lazy = Lazy::new(|| { + Regex::new(concat!( "(?si)", // wrapped text r"()|(.*?)|(.*?)", // html tags r"|(<.*?>)", )) - .unwrap(); - - static ref HTML_LINEBREAK_TAGS: Regex = Regex::new( + .unwrap() +}); +static HTML_LINEBREAK_TAGS: Lazy = Lazy::new(|| { + Regex::new( r#"(?xsi) - "# - ).unwrap(); + "#, + ) + .unwrap() +}); - pub static ref HTML_MEDIA_TAGS: Regex = Regex::new( +pub static HTML_MEDIA_TAGS: Lazy = Lazy::new(|| { + Regex::new( r#"(?xsi) # the start of the image, audio, or object tag <\b(?:img|audio|video|object)\b @@ -140,11 +144,14 @@ lazy_static! { > ) ) - "# - ).unwrap(); - - // videos are also in sound tags - static ref AV_TAGS: Regex = Regex::new( + "#, + ) + .unwrap() +}); + +// videos are also in sound tags +static AV_TAGS: Lazy = Lazy::new(|| { + Regex::new( r"(?xs) \[sound:(.+?)\] # 1 - the filename in a sound tag | @@ -152,15 +159,20 @@ lazy_static! { \[(.*?)\] # 2 - arguments to tts call (.*?) # 3 - field text \[/anki:tts\] - ").unwrap(); + ", + ) + .unwrap() +}); - static ref PERSISTENT_HTML_SPACERS: Regex = Regex::new(r"(?i)|
|\n").unwrap(); +static PERSISTENT_HTML_SPACERS: Lazy = + Lazy::new(|| Regex::new(r"(?i)|
|\n").unwrap()); - static ref TYPE_TAG: Regex = Regex::new(r"\[\[type:[^]]+\]\]").unwrap(); - pub(crate) static ref SOUND_TAG: Regex = Regex::new(r"\[sound:([^]]+)\]").unwrap(); +static TYPE_TAG: Lazy = Lazy::new(|| Regex::new(r"\[\[type:[^]]+\]\]").unwrap()); +pub(crate) static SOUND_TAG: Lazy = Lazy::new(|| Regex::new(r"\[sound:([^]]+)\]").unwrap()); - /// Files included in CSS with a leading underscore. - static ref UNDERSCORED_CSS_IMPORTS: Regex = Regex::new( +/// Files included in CSS with a leading underscore. +static UNDERSCORED_CSS_IMPORTS: Lazy = Lazy::new(|| { + Regex::new( r#"(?xi) (?:@import\s+ # import statement with a bare "(_[^"]*.css)" # double quoted @@ -175,10 +187,14 @@ lazy_static! { | # or (_.+) # unquoted filename \s*\)) - "#).unwrap(); - - /// Strings, src and data attributes with a leading underscore. - static ref UNDERSCORED_REFERENCES: Regex = Regex::new( + "#, + ) + .unwrap() +}); + +/// Strings, src and data attributes with a leading underscore. +static UNDERSCORED_REFERENCES: Lazy = Lazy::new(|| { + Regex::new( r#"(?x) \[sound:(_[^]]+)\] # a filename in an Anki sound tag | # or @@ -189,8 +205,10 @@ lazy_static! { \b(?:src|data) # a 'src' or 'data' attribute = # followed by (_[^ >]+) # an unquoted value - "#).unwrap(); -} + "#, + ) + .unwrap() +}); pub fn is_html(text: impl AsRef) -> bool { HTML.is_match(text.as_ref()) @@ -439,16 +457,16 @@ pub(crate) fn without_combining(s: &str) -> Cow { /// Check if string contains an unescaped wildcard. pub(crate) fn is_glob(txt: &str) -> bool { // even number of \s followed by a wildcard - lazy_static! { - static ref RE: Regex = Regex::new( + static RE: Lazy = Lazy::new(|| { + Regex::new( r"(?x) (?:^|[^\\]) # not a backslash (?:\\\\)* # even number of backslashes [*_] # wildcard - " + ", ) - .unwrap(); - } + .unwrap() + }); RE.is_match(txt) } @@ -460,9 +478,7 @@ pub(crate) fn to_re(txt: &str) -> Cow { /// Convert Anki style to RegEx using the provided wildcard. pub(crate) fn to_custom_re<'a>(txt: &'a str, wildcard: &str) -> Cow<'a, str> { - lazy_static! { - static ref RE: Regex = Regex::new(r"\\?.").unwrap(); - } + static RE: Lazy = Lazy::new(|| Regex::new(r"\\?.").unwrap()); RE.replace_all(txt, |caps: &Captures| { let s = &caps[0]; match s { @@ -478,9 +494,7 @@ pub(crate) fn to_custom_re<'a>(txt: &'a str, wildcard: &str) -> Cow<'a, str> { /// Convert to SQL respecting Anki wildcards. pub(crate) fn to_sql(txt: &str) -> Cow { // escape sequences and unescaped special characters which need conversion - lazy_static! { - static ref RE: Regex = Regex::new(r"\\[\\*]|[*%]").unwrap(); - } + static RE: Lazy = Lazy::new(|| Regex::new(r"\\[\\*]|[*%]").unwrap()); RE.replace_all(txt, |caps: &Captures| { let s = &caps[0]; match s { @@ -495,17 +509,13 @@ pub(crate) fn to_sql(txt: &str) -> Cow { /// Unescape everything. pub(crate) fn to_text(txt: &str) -> Cow { - lazy_static! { - static ref RE: Regex = Regex::new(r"\\(.)").unwrap(); - } + static RE: Lazy = Lazy::new(|| Regex::new(r"\\(.)").unwrap()); RE.replace_all(txt, "$1") } /// Escape Anki wildcards and the backslash for escaping them: \*_ pub(crate) fn escape_anki_wildcards(txt: &str) -> String { - lazy_static! { - static ref RE: Regex = Regex::new(r"[\\*_]").unwrap(); - } + static RE: Lazy = Lazy::new(|| Regex::new(r"[\\*_]").unwrap()); RE.replace_all(txt, r"\$0").into() } @@ -538,9 +548,8 @@ pub(crate) fn glob_matcher(search: &str) -> impl Fn(&str) -> bool + '_ { } } -lazy_static! { - pub(crate) static ref REMOTE_FILENAME: Regex = Regex::new("(?i)^https?://").unwrap(); -} +pub(crate) static REMOTE_FILENAME: Lazy = + Lazy::new(|| Regex::new("(?i)^https?://").unwrap()); /// https://url.spec.whatwg.org/#fragment-percent-encode-set const FRAGMENT_QUERY_UNION: &AsciiSet = &CONTROLS diff --git a/rslib/src/version.rs b/rslib/src/version.rs index dfe7cab4f24..ca05b95964a 100644 --- a/rslib/src/version.rs +++ b/rslib/src/version.rs @@ -3,7 +3,7 @@ use std::env; -use lazy_static::lazy_static; +use once_cell::sync::Lazy; pub fn version() -> &'static str { include_str!("../../.version").trim() @@ -14,25 +14,25 @@ pub fn buildhash() -> &'static str { } pub(crate) fn sync_client_version() -> &'static str { - lazy_static! { - static ref VER: String = format!( + static VER: Lazy = Lazy::new(|| { + format!( "anki,{version} ({buildhash}),{platform}", version = version(), buildhash = buildhash(), platform = env::var("PLATFORM").unwrap_or_else(|_| env::consts::OS.to_string()) - ); - } + ) + }); &VER } pub(crate) fn sync_client_version_short() -> &'static str { - lazy_static! { - static ref VER: String = format!( + static VER: Lazy = Lazy::new(|| { + format!( "{version},{buildhash},{platform}", version = version(), buildhash = buildhash(), platform = env::consts::OS - ); - } + ) + }); &VER }