From 0a8a74a28fc29a9709abb6c771a70db6664a0656 Mon Sep 17 00:00:00 2001 From: scarf Date: Sat, 2 Sep 2023 08:11:34 +0900 Subject: [PATCH] refactor: convert to f-strings (#3107) used flynt. see: https://github.com/ikamensh/flynt --- lang/concat_pot_files.py | 6 +- lang/extract_json_strings.py | 144 +++++++++++----------- lang/unicode_check.py | 4 +- tools/check_po_printf_format.py | 2 +- tools/copy_from.py | 6 +- tools/copy_mac_libs.py | 10 +- tools/dialogue_validator.py | 14 +-- tools/fix-compilation-database.py | 2 +- tools/gfx_tools/decompose.py | 26 ++-- tools/gfx_tools/png_update.py | 20 +-- tools/json_tools/cddatags.py | 7 +- tools/json_tools/keys.py | 4 +- tools/json_tools/util.py | 6 +- tools/json_tools/values.py | 6 +- tools/merge_maps.py | 23 ++-- tools/palettize.py | 8 +- tools/update_blueprint_needs.py | 8 +- utilities/building-utility/deconstruct.py | 2 +- 18 files changed, 148 insertions(+), 150 deletions(-) diff --git a/lang/concat_pot_files.py b/lang/concat_pot_files.py index 71c3f830e103..837ad5af914e 100755 --- a/lang/concat_pot_files.py +++ b/lang/concat_pot_files.py @@ -18,13 +18,13 @@ destination_file = args[2] if not os.path.isfile(source_file_1): - print("Error: Couldn't find file '{}'.".format(source_file_1)) + print(f"Error: Couldn't find file '{source_file_1}'.") exit(1) if not os.path.isfile(source_file_2): - print("Error: Couldn't find file '{}'.".format(source_file_2)) + print(f"Error: Couldn't find file '{source_file_2}'.") exit(1) -print("==> Merging '{}' and '{}' into '{}".format(source_file_1, source_file_2, destination_file)) +print(f"==> Merging '{source_file_1}' and '{source_file_2}' into '{destination_file}") pot1 = polib.pofile(source_file_1) pot2 = polib.pofile(source_file_2) diff --git a/lang/extract_json_strings.py b/lang/extract_json_strings.py index d9cc0c93381d..7cd9d7e4e795 100755 --- a/lang/extract_json_strings.py +++ b/lang/extract_json_strings.py @@ -62,7 +62,7 @@ def __init__(self, msg, item): self.item = item def __str__(self): - return ("---\nJSON error\n{0}\n--- JSON Item:\n{1}\n---".format(self.msg, self.item)) + return (f"---\nJSON error\n{self.msg}\n--- JSON Item:\n{self.item}\n---") git_files_list = {os.path.normpath(i) for i in { ".", @@ -222,7 +222,7 @@ def warning_supressed(filename): all_genders = ["f", "m", "n"] def gender_options(subject): - return [subject + ":" + g for g in all_genders] + return [f"{subject}:{g}" for g in all_genders] ## ## SPECIALIZED EXTRACTION FUNCTIONS @@ -276,7 +276,7 @@ def extract_material(state, item): writestr(state, item["dmg_adj"][3]) wrote = True if not wrote and not "copy-from" in item: - print("WARNING: {}: no mandatory field in item: {}".format(state.current_source_file, item)) + print(f"WARNING: {state.current_source_file}: no mandatory field in item: {item}") def extract_martial_art(state, item): @@ -287,10 +287,10 @@ def extract_martial_art(state, item): name = item["id"] if "description" in item: writestr(state, item["description"], - comment="Description for martial art '{}'".format(name)) + comment=f"Description for martial art '{name}'") if "initiate" in item: writestr(state, item["initiate"], format_strings=True, - comment="Initiate message for martial art '{}'".format(name)) + comment=f"Initiate message for martial art '{name}'") onhit_buffs = item.get("onhit_buffs", list()) static_buffs = item.get("static_buffs", list()) onmove_buffs = item.get("onmove_buffs", list()) @@ -299,9 +299,9 @@ def extract_martial_art(state, item): for buff in buffs: writestr(state, buff["name"]) if buff["name"] == item["name"]: - c="Description of buff for martial art '{}'".format(name) + c=f"Description of buff for martial art '{name}'" else: - c="Description of buff '{}' for martial art '{}'".format(buff["name"], name) + c=f"Description of buff '{buff['name']}' for martial art '{name}'" writestr(state, buff["description"], comment=c) @@ -314,7 +314,7 @@ def extract_effect_type(state, item): for nm_desc in zip(ctxt_name, item.get("desc", ())): writestr(state, nm_desc[0]) writestr(state, nm_desc[1], format_strings=True, - comment="Description of effect '{}'.".format(nm_desc[0])) + comment=f"Description of effect '{nm_desc[0]}'.") else: for i in ctxt_name: writestr(state, i) @@ -331,7 +331,7 @@ def extract_effect_type(state, item): writestr(state, msg, format_strings=True) else: writestr(state, msg, format_strings=True, - comment="Apply message for effect(s) '{}'.".format(', '.join(name))) + comment=f"Apply message for effect(s) '{', '.join(name)}'.") # remove_message msg = item.get("remove_message") @@ -339,7 +339,7 @@ def extract_effect_type(state, item): writestr(state, msg, format_strings=True) else: writestr(state, msg, format_strings=True, - comment="Remove message for effect(s) '{}'.".format(', '.join(name))) + comment=f"Remove message for effect(s) '{', '.join(name)}'.") # miss messages msg = item.get("miss_messages", ()) @@ -349,7 +349,7 @@ def extract_effect_type(state, item): else: for m in msg: writestr(state, m[0], - comment="Miss message for effect(s) '{}'.".format(', '.join(name))) + comment=f"Miss message for effect(s) '{', '.join(name)}'.") msg = item.get("decay_messages", ()) if not name: for m in msg: @@ -357,14 +357,14 @@ def extract_effect_type(state, item): else: for m in msg: writestr(state, m[0], - comment="Decay message for effect(s) '{}'.".format(', '.join(name))) + comment=f"Decay message for effect(s) '{', '.join(name)}'.") # speed_name if "speed_name" in item: if not name: writestr(state, item.get("speed_name")) else: - writestr(state, item.get("speed_name"), comment="Speed name of effect(s) '{}'.".format(', '.join(name))) + writestr(state, item.get("speed_name"), comment=f"Speed name of effect(s) '{', '.join(name)}'.") # apply and remove memorial messages. msg = item.get("apply_memorial_log") @@ -373,18 +373,18 @@ def extract_effect_type(state, item): writestr(state, msg, context="memorial_female") else: writestr(state, msg, context="memorial_male", - comment="Male memorial apply log for effect(s) '{}'.".format(', '.join(name))) + comment=f"Male memorial apply log for effect(s) '{', '.join(name)}'.") writestr(state, msg, context="memorial_female", - comment="Female memorial apply log for effect(s) '{}'.".format(', '.join(name))) + comment=f"Female memorial apply log for effect(s) '{', '.join(name)}'.") msg = item.get("remove_memorial_log") if not name: writestr(state, msg, context="memorial_male") writestr(state, msg, context="memorial_female") else: writestr(state, msg, context="memorial_male", - comment="Male memorial remove log for effect(s) '{}'.".format(', '.join(name))) + comment=f"Male memorial remove log for effect(s) '{', '.join(name)}'.") writestr(state, msg, context="memorial_female", - comment="Female memorial remove log for effect(s) '{}'.".format(', '.join(name))) + comment=f"Female memorial remove log for effect(s) '{', '.join(name)}'.") def extract_gun(state, item): @@ -458,9 +458,9 @@ def extract_profession(state, item): comment_f = entry_f["str"] if "description" in item: writestr(state, add_context(item["description"], "prof_desc_male"), - comment="Profession (male {}) description".format(comment_m)) + comment=f"Profession (male {comment_m}) description") writestr(state, add_context(item["description"], "prof_desc_female"), - comment="Profession (female {}) description".format(comment_f)) + comment=f"Profession (female {comment_f}) description") def extract_scenario(state, item): @@ -469,28 +469,28 @@ def extract_scenario(state, item): writestr(state, name, context="scenario_male", - comment="Name for scenario '{}' for a male character".format(name)) + comment=f"Name for scenario '{name}' for a male character") writestr(state, name, context="scenario_female", - comment="Name for scenario '{}' for a female character".format(name)) + comment=f"Name for scenario '{name}' for a female character") if name: msg = item.get("description") if msg: writestr(state, msg, context="scen_desc_male", - comment="Description for scenario '{}' for a male character.".format(name)) + comment=f"Description for scenario '{name}' for a male character.") writestr(state, msg, context="scen_desc_female", - comment="Description for scenario '{}' for a female character.".format(name)) + comment=f"Description for scenario '{name}' for a female character.") msg = item.get("start_name") if msg: writestr(state, msg, context="start_name", - comment="Starting location for scenario '{}'.".format(name)) + comment=f"Starting location for scenario '{name}'.") else: for f in ["description", "start_name"]: found = item.get(f, None) @@ -599,7 +599,7 @@ def extract_talk_effects(state, effects): for eff in effects: if type(eff) == dict: if "u_buy_monster" in eff and "name" in eff: - writestr(state, eff["name"], comment="Nickname for creature '{}'".format(eff["u_buy_monster"])) + writestr(state, eff["name"], comment=f"Nickname for creature '{eff['u_buy_monster']}'") def extract_talk_response(state, response): @@ -645,7 +645,7 @@ def extract_technique(state, item): def extract_trap(state, item): writestr(state, item["name"]) if "vehicle_data" in item and "sound" in item["vehicle_data"]: - writestr(state, item["vehicle_data"]["sound"], comment="Trap-vehicle collision message for trap '{}'".format(item["name"])) + writestr(state, item["vehicle_data"]["sound"], comment=f"Trap-vehicle collision message for trap '{item['name']}'") def extract_missiondef(state, item): @@ -654,7 +654,7 @@ def extract_missiondef(state, item): raise WrongJSONItem("JSON item don't contain 'name' field", item) writestr(state, item_name) if "description" in item: - writestr(state, item["description"], comment="Description for mission '{}'".format(item_name)) + writestr(state, item["description"], comment=f"Description for mission '{item_name}'") if "dialogue" in item: dialogue = item.get("dialogue") if "describe" in dialogue: @@ -699,7 +699,7 @@ def extract_mutation(state, item): found = item.get(f) # Need that check due format string argument if found is not None: - writestr(state, found, comment="Description for {}".format(item_name_or_id)) + writestr(state, found, comment=f"Description for {item_name_or_id}") if "attacks" in item: attacks = item.get("attacks") @@ -736,13 +736,13 @@ def extract_mutation_category(state, item): found = item.get(f) # Need that check due format string argument if found is not None: - writestr(state, found, comment="Mutation class: {} {}".format(item_name, f)) + writestr(state, found, comment=f"Mutation class: {item_name} {f}") found = item.get("memorial_message") writestr(state, found, context="memorial_male", - comment="Mutation class: {} Male memorial messsage".format(item_name)) + comment=f"Mutation class: {item_name} Male memorial messsage") writestr(state, found, context="memorial_female", - comment="Mutation class: {} Female memorial messsage".format(item_name)) + comment=f"Mutation class: {item_name} Female memorial messsage") def extract_vehspawn(state, item): @@ -771,7 +771,7 @@ def extract_recipe_category(state, item): continue subcat_name = subcat.split('_')[2] writestr(state, subcat_name, - comment="Crafting recipes subcategory of '{}' category".format(cat_name)) + comment=f"Crafting recipes subcategory of '{cat_name}' category") def extract_gate(state, item): @@ -779,7 +779,7 @@ def extract_gate(state, item): for (k, v) in sorted(messages.items(), key=lambda x: x[0]): writestr(state, v, - comment="'{}' action message of some gate object.".format(k)) + comment=f"'{k}' action message of some gate object.") def extract_field_type(state, item): @@ -799,19 +799,19 @@ def extract_ter_furn_transform(state, item): def extract_skill_display_type(state, item): - writestr(state, item["display_string"], comment="Display string for skill display type '{}'".format(item["id"])) + writestr(state, item["display_string"], comment=f"Display string for skill display type '{item['id']}'") def extract_fault(state, item): writestr(state, item["name"]) - writestr(state, item["description"], comment="Description for fault '{}'".format(item["name"])) + writestr(state, item["description"], comment=f"Description for fault '{item['name']}'") for method in item["mending_methods"]: if "name" in method: - writestr(state, method["name"], comment="Name of mending method for fault '{}'".format(item["name"])) + writestr(state, method["name"], comment=f"Name of mending method for fault '{item['name']}'") if "description" in method: - writestr(state, method["description"], comment="Description for mending method '{}' of fault '{}'".format(method["name"], item["name"])) + writestr(state, method["description"], comment=f"Description for mending method '{method['name']}' of fault '{item['name']}'") if "success_msg" in method: - writestr(state, method["success_msg"], format_strings=True, comment="Success message for mending method '{}' of fault '{}'".format(method["name"], item["name"])) + writestr(state, method["success_msg"], format_strings=True, comment=f"Success message for mending method '{method['name']}' of fault '{item['name']}'") def extract_snippet(state, item): @@ -875,7 +875,7 @@ def add_context(entry, ctxt): entry = deepcopy(entry) if type(entry) == dict: if "ctxt" in entry: - entry["ctxt"] += "|" + ctxt + entry["ctxt"] += f"|{ctxt}" else: entry["ctxt"] = ctxt return entry @@ -893,7 +893,7 @@ def writestr_basic(state, msgid, msgid_plural, msgctxt, comment, check_c_format) if comment: # Append `~ ` to help translators distinguish between comments - comment = '~ ' + comment + comment = f"~ {comment}" entry = polib.POEntry( msgid=msgid, @@ -917,7 +917,7 @@ def writestr(state, string, context=None, format_strings=False, comment=None, pl if comment is None: comment = string["//~"] else: - comment = "{}\n{}".format(comment, string["//~"]) + comment = f"{comment}\n{string['//~']}" context = string.get( "ctxt" ) str_pl = None if pl_fmt: @@ -927,7 +927,7 @@ def writestr(state, string, context=None, format_strings=False, comment=None, pl str_pl = string["str_sp"] else: # no "str_pl" entry in json, assuming regular plural form as in translations.cpp - str_pl = "{}s".format(string["str"]) + str_pl = f"{string['str']}s" elif "str_pl" in string or "str_sp" in string: raise WrongJSONItem("ERROR: 'str_pl' and 'str_sp' not supported here", string) if "str" in string: @@ -943,7 +943,7 @@ def writestr(state, string, context=None, format_strings=False, comment=None, pl str_singular = string if pl_fmt: # no "str_pl" entry in json, assuming regular plural form as in translations.cpp - str_pl = "{}s".format(string) + str_pl = f"{string}s" else: str_pl = None elif string is None: @@ -996,7 +996,7 @@ def extract_use_action_msgs(state, use_action, it_name): if type(use_action) is dict and f in use_action: if it_name: writestr(state, use_action[f], - comment="Use action {} for {}.".format(f, it_name)) + comment=f"Use action {f} for {it_name}.") # Recursively check sub objects as they may contain more messages. if type(use_action) is list: for i in use_action: @@ -1022,9 +1022,9 @@ def extract_json(state, item): extract_specials[object_type](state, item) return elif object_type not in automatically_convertible: - raise WrongJSONItem("ERROR: Unrecognized object type '{}'!".format(object_type), item) + raise WrongJSONItem(f"ERROR: Unrecognized object type '{object_type}'!", item) if object_type not in known_types: - print("WARNING: known_types does not contain object type '{}'".format(object_type)) + print(f"WARNING: known_types does not contain object type '{object_type}'") # Use mod id as project name if project name is not specified if object_type == "MOD_INFO" and not state.project_name: state.project_name = item.get("id") @@ -1053,12 +1053,12 @@ def extract_json(state, item): wrote = True if "conditional_names" in item: for cname in item["conditional_names"]: - c = "Conditional name for {} when {} matches {}".format(name, cname["type"], cname["condition"]) + c = f"Conditional name for {name} when {cname['type']} matches {cname['condition']}" writestr(state, cname["name"], comment=c, format_strings=True, pl_fmt=True) wrote = True if "description" in item: if name: - c = "Description for {}".format(name) + c = f"Description for {name}" else: c = None writestr(state, item["description"], comment=c) @@ -1070,7 +1070,7 @@ def extract_json(state, item): writestr(state, item["sound"]) wrote = True if "sound_description" in item: - writestr(state, item["sound_description"], comment="Description for the sound of spell '{}'".format(name)) + writestr(state, item["sound_description"], comment=f"Description for the sound of spell '{name}'") wrote = True if "snippet_category" in item and type(item["snippet_category"]) is list: # snippet_category is either a simple string (the category ident) @@ -1124,7 +1124,7 @@ def extract_json(state, item): for rech in relic_data["recharge_scheme"]: if "message" in rech: writestr(state, rech["message"], - comment="Relic recharge message for {} '{}'".format(object_type, name) + comment=f"Relic recharge message for {object_type} '{name}'" ) wrote = True if "text" in item: @@ -1135,7 +1135,7 @@ def extract_json(state, item): wrote = True if "message" in item: writestr(state, item["message"], format_strings=True, - comment="Message for {} '{}'".format(object_type, name) ) + comment=f"Message for {object_type} '{name}'" ) wrote = True if "messages" in item: for message in item["messages"]: @@ -1168,19 +1168,19 @@ def extract_json(state, item): wrote = True if not wrote and not "copy-from" in item: if not warning_supressed(state.current_source_file): - print("WARNING: {}: nothing translatable found in item: {}".format(state.current_source_file, item)) + print(f"WARNING: {state.current_source_file}: nothing translatable found in item: {item}") def assert_num_args(node, args, n): if len(args) != n: - raise Exception("invalid amount of arguments in translation call (found {}, expected {}). Error source: {}".format(len(args), n, ast.to_lua_source(node))) + raise Exception(f"invalid amount of arguments in translation call (found {len(args)}, expected {n}). Error source: {ast.to_lua_source(node)}") def get_string_literal(node, args, pos): if isinstance(args[pos], astnodes.String): return args[pos].s else: - raise Exception("argument to translation call should be string. Error source: {}".format(ast.to_lua_source(node))) + raise Exception(f"argument to translation call should be string. Error source: {ast.to_lua_source(node)}") class LuaComment: @@ -1233,7 +1233,7 @@ def load_comments(self, comments): def report_unused_comments(self): for comment in self.trans_comments: if not comment.used: - print("WARNING: unused translator comment at {}:{}".format(self.state.current_source_file, comment.line )) + print(f"WARNING: unused translator comment at {self.state.current_source_file}:{comment.line}") def __find_trans_comments_before(self, line): for comment in self.trans_comments: @@ -1257,7 +1257,7 @@ def __find_comment(self, line): else: comment = self.__find_regular_comment_before(line) if comment: - print("WARNING: regular comment used when translation comment may be intended. Error source: {}:{}".format(self.state.current_source_file, comment.line )) + print(f"WARNING: regular comment used when translation comment may be intended. Error source: {self.state.current_source_file}:{comment.line}") return None def visit_Call(self, node): @@ -1301,7 +1301,7 @@ def visit_Call(self, node): msgid_plural = get_string_literal(node, func_args, 2) write = True except Exception as E: - print("WARNING: {}".format(E)) + print(f"WARNING: {E}") if write: comment = self.__find_comment(func_line) writestr_basic(self.state, msgid, msgid_plural, msgctxt, comment, check_c_format = True) @@ -1323,7 +1323,7 @@ def extract_lua(state, source): for comment in comments: if comment.is_trans_comment: - print("Line {}: {}".format( comment.line, comment.text)) + print(f"Line {comment.line}: {comment.text}") visitor = LuaCallVisitor() visitor.register_state(state) @@ -1348,25 +1348,25 @@ def extract_all_from_dir(state, dir): full_name = os.path.join(dir, f) if os.path.isdir(full_name): if os.path.normpath(full_name) in ignore_dirs: - log_verbose("Skipping dir (ignored): {}".format(f)) + log_verbose(f"Skipping dir (ignored): {f}") else: dirs.append(f) elif f in skiplist: - log_verbose("Skipping file (skiplist): '{}'".format(f)) + log_verbose(f"Skipping file (skiplist): '{f}'") elif full_name in ignore_files: - log_verbose("Skipping file (ignored): '{}'".format(f)) + log_verbose(f"Skipping file (ignored): '{f}'") elif f.endswith(".json"): if not options.tracked_only or full_name in git_files_list: extract_all_from_json_file(state, full_name) else: - log_verbose("Skipping file (untracked): '{}'".format(full_name)) + log_verbose(f"Skipping file (untracked): '{full_name}'") elif f.endswith(".lua"): if not options.tracked_only or full_name in git_files_list: extract_all_from_lua_file(state, full_name) else: - log_verbose("Skipping file (untracked): '{}'".format(full_name)) + log_verbose(f"Skipping file (untracked): '{full_name}'") else: - log_verbose("Skipping file (not applicable): '{}'".format(f)) + log_verbose(f"Skipping file (not applicable): '{f}'") for d in dirs: extract_all_from_dir(state, os.path.join(dir, d)) @@ -1374,7 +1374,7 @@ def extract_all_from_dir(state, dir): def extract_all_from_json_file(state, json_file): "Extract translatable strings from every object in the specified JSON file." state.current_source_file = json_file - log_verbose("Loading {}".format(json_file)) + log_verbose(f"Loading {json_file}") with open(json_file, encoding="utf-8") as fp: jsondata = json.load(fp) @@ -1386,7 +1386,7 @@ def extract_all_from_json_file(state, json_file): for jsonobject in jsondata: extract_json(state, jsonobject) except WrongJSONItem as E: - print("---\nFile: '{0}'".format(json_file)) + print(f"---\nFile: '{json_file}'") print(E) exit(1) @@ -1394,7 +1394,7 @@ def extract_all_from_json_file(state, json_file): def extract_all_from_lua_file(state, lua_file): "Extract translatable strings from lua code in the specified file." state.current_source_file = lua_file - log_verbose("Loading {}".format(lua_file)) + log_verbose(f"Loading {lua_file}") with open(lua_file, encoding="utf-8") as fp: luadata_raw = fp.read() @@ -1402,7 +1402,7 @@ def extract_all_from_lua_file(state, lua_file): try: extract_lua(state, luadata_raw) except Exception as E: - print("---\nFile: '{0}'".format(lua_file)) + print(f"---\nFile: '{lua_file}'") print(E) exit(1) @@ -1417,7 +1417,7 @@ def prepare_git_file_list(): output = res.stdout.readlines() res.communicate() if res.returncode != 0: - print("'git ls-files' command exited with non-zero exit code: {}".format(res.returncode)) + print(f"'git ls-files' command exited with non-zero exit code: {res.returncode}") exit(1) for f in output: if len(f) > 0: @@ -1467,15 +1467,15 @@ def write_pot(entries, output_path, project_name): print("==> Parsing JSON") for i in sorted(directories): - print("----> Traversing directory {}".format(i)) + print(f"----> Traversing directory {i}") extract_all_from_dir(state, i) if options.warn_unused_types: print("==> Checking types") if len(known_types - found_types) != 0: - print("WARNING: type {} not found in any JSON objects".format(known_types - found_types)) + print(f"WARNING: type {known_types - found_types} not found in any JSON objects") if len(needs_plural - found_types) != 0: - print("WARNING: type {} from needs_plural not found in any JSON objects".format(needs_plural - found_types)) + print(f"WARNING: type {needs_plural - found_types} from needs_plural not found in any JSON objects") print("==> Writing POT") if not state.project_name: diff --git a/lang/unicode_check.py b/lang/unicode_check.py index b1759d1a018a..36b616de8c13 100755 --- a/lang/unicode_check.py +++ b/lang/unicode_check.py @@ -7,7 +7,7 @@ def print_encode_error(unicode_err, counter): err_line = counter + chunk.count(b'\n', 0, unicode_err.start) line_start = chunk.rfind(b'\n', 0, unicode_err.start) + 1 line_end = chunk.find(b'\n', line_start) - print("Unicode error on line {0}:".format(err_line)) + print(f"Unicode error on line {err_line}:") # Use RAW write because this is bytes class sys.stdout.buffer.write(chunk[line_start:line_end + 1]) x_num = unicode_err.end - unicode_err.start + 2 @@ -30,7 +30,7 @@ def check(f): if __name__ == '__main__': if len(sys.argv) < 2: - print("Usage: {} [FILENAME]".format(sys.argv[0])) + print(f"Usage: {sys.argv[0]} [FILENAME]") sys.exit(1) with open(sys.argv[1], encoding="utf-8") as pot_file: if not check(pot_file): diff --git a/tools/check_po_printf_format.py b/tools/check_po_printf_format.py index f195795f796d..09b8c60d6fba 100755 --- a/tools/check_po_printf_format.py +++ b/tools/check_po_printf_format.py @@ -125,7 +125,7 @@ def print_message(msg, segments): for file in sorted(os.listdir("lang/po")): if file.endswith(".po") and not file.endswith("en.po"): print("Checking", file, end="", flush=True) - errors = check_po_file("lang/po/" + file) + errors = check_po_file(f"lang/po/{file}") n = len(errors) num_errors += n if n > 0: diff --git a/tools/copy_from.py b/tools/copy_from.py index b8dc3157a07c..701406b77ad2 100755 --- a/tools/copy_from.py +++ b/tools/copy_from.py @@ -16,11 +16,11 @@ def get_data(argsDict, resource_name): with open(resource_filename) as resource_file: resource += json.load(resource_file) except FileNotFoundError: - exit("Failed: could not find {}".format(resource_filename)) + exit(f"Failed: could not find {resource_filename}") else: - print("Invalid filename {}".format(resource_filename)) + print(f"Invalid filename {resource_filename}") if not resource: - exit("Failed: {} was empty".format(resource_filename)) + exit(f"Failed: {resource_filename} was empty") return resource diff --git a/tools/copy_mac_libs.py b/tools/copy_mac_libs.py index 11bbea6f291a..e10871f3f4fc 100644 --- a/tools/copy_mac_libs.py +++ b/tools/copy_mac_libs.py @@ -22,25 +22,25 @@ def rewrite_identity(object): shutil.chown(object, os.getuid()) st = os.stat(object) os.chmod(object, st.st_mode | stat.S_IWUSR) - id = "@executable_path/{}".format(os.path.basename(object)) + id = f"@executable_path/{os.path.basename(object)}" ret = subprocess.run(["install_name_tool", "-id", id, object]) if ret.returncode != 0: print("Error:", ret.stderr.decode('utf-8')) os.chmod(object, (st.st_mode | stat.S_IWUSR) ^ stat.S_IWUSR) - print("Rewritten identity of {}".format(object)) + print(f"Rewritten identity of {object}") def rewrite_dependency(object, dependency): shutil.chown(object, os.getuid()) st = os.stat(object) os.chmod(object, st.st_mode | stat.S_IWUSR) - dest = "@executable_path/{}".format(os.path.basename(dependency)) + dest = f"@executable_path/{os.path.basename(dependency)}" ret = subprocess.run(["install_name_tool", "-change", dependency, dest, object]) if ret.returncode != 0: print("Error:", ret.stderr.decode('utf-8')) os.chmod(object, (st.st_mode | stat.S_IWUSR) ^ stat.S_IWUSR) - print("Rewritten reference from {} to {}".format(dependency, dest)) + print(f"Rewritten reference from {dependency} to {dest}") def copy_and_rewrite(file): @@ -64,7 +64,7 @@ def copy_and_rewrite(file): copied_file = file if file != executable: copied_file = shutil.copy2(file, executable_dir) - print("Copied {} to {}".format(file, copied_file)) + print(f"Copied {file} to {copied_file}") for dependency in dependencies: if dependency == file: rewrite_identity(copied_file) diff --git a/tools/dialogue_validator.py b/tools/dialogue_validator.py index 263e26db9f9f..308598fc2a29 100755 --- a/tools/dialogue_validator.py +++ b/tools/dialogue_validator.py @@ -30,7 +30,7 @@ def get_dialogue_from_json(): arg_path = arg_path[:-1] for subdir_path, dirnames, filenames in os.walk(arg_path): for filename in filenames: - path = subdir_path + "/" + filename + path = f"{subdir_path}/{filename}" if path == "data/json/npcs/TALK_TEST.json": continue if path.endswith(".json"): @@ -151,15 +151,15 @@ def validate(dialogue): if not topic_record.get("valid", False): all_topics_valid = False if topic_id in start_topics: - print("talk topic {} referenced in an NPC chat but not defined".format(topic_id)) + print(f"talk topic {topic_id} referenced in an NPC chat but not defined") else: - print("talk topic {} referenced in a response but not defined".format(topic_id)) + print(f"talk topic {topic_id} referenced in a response but not defined") if not topic_record.get("in_response", False): all_topics_valid = False - print("talk topic {} defined but not referenced in a response".format(topic_id)) + print(f"talk topic {topic_id} defined but not referenced in a response") if topic_id in OBSOLETE_TOPICS: all_topics_valid = False - print("talk topic {} referenced despite being listed as obsolete.".format(topic_id)) + print(f"talk topic {topic_id} referenced despite being listed as obsolete.") no_change = False passes = 0 @@ -208,8 +208,8 @@ def validate(dialogue): continue branch_record = topic_branches[topic_id] if not branch_record["ends"]: - print("{} does not reach TALK_DONE".format(topic_id)) + print(f"{topic_id} does not reach TALK_DONE") if not branch_record["parent"] in start_topics: - print("no path from a start topic to {}".format(topic_id)) + print(f"no path from a start topic to {topic_id}") validate(get_dialogue_from_json()) diff --git a/tools/fix-compilation-database.py b/tools/fix-compilation-database.py index 8c8ee15a3f31..a9c5178c4e21 100755 --- a/tools/fix-compilation-database.py +++ b/tools/fix-compilation-database.py @@ -29,7 +29,7 @@ else: match_result = starts_with_drive_letter.match(command[i]) if match_result: - command[i] = "{}:/{}".format(match_result.group(1), match_result.group(2)) + command[i] = f"{match_result.group(1)}:/{match_result.group(2)}" i = i + 1 data[j]["command"] = " ".join([shlex.quote(s) for s in command]) diff --git a/tools/gfx_tools/decompose.py b/tools/gfx_tools/decompose.py index 770ca0678b53..9824897e5d24 100755 --- a/tools/gfx_tools/decompose.py +++ b/tools/gfx_tools/decompose.py @@ -178,16 +178,16 @@ def parse_tile_entry(self, tile_entry, refs): pngnum = all_pngnums[i] if pngnum in refs.pngnum_to_pngname: continue - pngname = "{}_{}_{}".format(pngnum, tile_id, i + offset) + pngname = f"{pngnum}_{tile_id}_{i + offset}" while pngname in refs.pngname_to_pngnum: offset += 1 - pngname = "{}_{}_{}".format(pngnum, tile_id, i + offset) + pngname = f"{pngnum}_{tile_id}_{i + offset}" try: refs.pngnum_to_pngname.setdefault(pngnum, pngname) refs.pngname_to_pngnum.setdefault(pngname, pngnum) refs.add_pngnum_to_tsfilepath(pngnum) except TypeError: - print("failed to parse {}".format(json.dumps(tile_entry, indent=2))) + print(f"failed to parse {json.dumps(tile_entry, indent=2)}") raise return tile_id @@ -204,7 +204,7 @@ def summarize(self, tile_info, refs): if self.pngnum_max > 0: refs.ts_data[self.ts_filename] = self ts_tile_info = { - "//": "indices {} to {}".format(self.pngnum_min, self.pngnum_max) + "//": f"indices {self.pngnum_min} to {self.pngnum_max}" } if self.write_dim: ts_tile_info["sprite_offset_x"] = self.sprite_offset_x @@ -224,8 +224,8 @@ def __init__(self, ts_filename, refs): self.valid = True ts_base = ts_filename.split(".png")[0] - geometry_dim = "{}x{}".format(self.ts_data.sprite_width, self.ts_data.sprite_height) - pngs_dir = "/pngs_" + ts_base + "_{}".format(geometry_dim) + geometry_dim = f"{self.ts_data.sprite_width}x{self.ts_data.sprite_height}" + pngs_dir = f"/pngs_{ts_base}_{geometry_dim}" self.ts_dir_pathname = refs.tileset_pathname + pngs_dir find_or_make_dir(self.ts_dir_pathname) self.tilenum_in_dir = 256 @@ -237,12 +237,12 @@ def write_expansions(self): expansion_id = expand_entry.get("id", "expansion") if not isinstance(expansion_id, str): continue - expand_entry_pathname = self.ts_dir_pathname + "/" + expansion_id + ".json" + expand_entry_pathname = f"{self.ts_dir_pathname}/{expansion_id}.json" write_to_json(expand_entry_pathname, expand_entry) def increment_dir(self): if self.tilenum_in_dir > 255: - self.subdir_pathname = self.ts_dir_pathname + "/" + "images{}".format(self.dir_count) + self.subdir_pathname = f"{self.ts_dir_pathname}/images{self.dir_count}" find_or_make_dir(self.subdir_pathname) self.tilenum_in_dir = 0 self.dir_count += 1 @@ -268,7 +268,7 @@ def extract_image(self, png_index, refs): tile_image = tile_data.ts_image.extract_area(tile_off_x, tile_off_y, tile_data.sprite_width, tile_data.sprite_height) - tile_png_pathname = self.subdir_pathname + "/" + pngname + ".png" + tile_png_pathname = f"{self.subdir_pathname}/{pngname}.png" tile_image.pngsave(tile_png_pathname) refs.extracted_pngnums[png_index] = True @@ -303,7 +303,7 @@ def get_all_data(self, tileset_dirname, delete_pathname): try: os.stat(self.tileset_pathname) except KeyError: - print("cannot find a directory {}".format(self.tileset_pathname)) + print(f"cannot find a directory {self.tileset_pathname}") exit -1 tileset_confname = refs.tileset_pathname + "/" + "tile_config.json" @@ -311,7 +311,7 @@ def get_all_data(self, tileset_dirname, delete_pathname): try: os.stat(tileset_confname) except KeyError: - print("cannot find a directory {}".format(tileset_confname)) + print(f"cannot find a directory {tileset_confname}") exit -1 if delete_pathname: @@ -389,7 +389,7 @@ def convert_pngnum_to_pngname(self, tile_entry): def report_missing(self): for pngnum in self.pngnum_to_pngname: if not self.extracted_pngnums.get(pngnum): - print("missing index {}, {}".format(pngnum, self.pngnum_to_pngname[pngnum])) + print(f"missing index {pngnum}, {self.pngnum_to_pngname[pngnum]}") args = argparse.ArgumentParser(description="Split a tileset's tile_config.json into a directory per tile containing the tile data and png.") @@ -452,7 +452,7 @@ def report_missing(self): tile_entry_name, tile_entry = refs.convert_pngnum_to_pngname(tile_entry) if not tile_entry_name: continue - tile_entry_pathname = subdir_pathname + "/" + tile_entry_name + "_" + str(idx) + ".json" + tile_entry_pathname = f"{subdir_pathname}/{tile_entry_name}_{str(idx)}.json" #if os.path.isfile(tile_entry_pathname): # print("overwriting {}".format(tile_entry_pathname)) write_to_json(tile_entry_pathname, tile_entry) diff --git a/tools/gfx_tools/png_update.py b/tools/gfx_tools/png_update.py index 0d01317271f1..44a69bd14094 100755 --- a/tools/gfx_tools/png_update.py +++ b/tools/gfx_tools/png_update.py @@ -124,31 +124,31 @@ def convert_tile_entry_file(file_path, old_name, new_name): if tmp_new_name.endswith(".png"): new_name = tmp_new_name[:-4] -old_name_json = old_name + ".json" -old_name_png = old_name + ".png" -new_name_json = new_name + ".json" -new_name_png = new_name + ".png" +old_name_json = f"{old_name}.json" +old_name_png = f"{old_name}.png" +new_name_json = f"{new_name}.json" +new_name_png = f"{new_name}.png" if not tileset_dirname.startswith("gfx/"): - tileset_dirname = "gfx/" + tileset_dirname + tileset_dirname = f"gfx/{tileset_dirname}" if tileset_dirname.endswith("/"): tileset_dirname = tileset_dirname[:-1] -print("In " + tileset_dirname + ", renaming " + old_name + " to " + new_name) +print(f"In {tileset_dirname}, renaming {old_name} to {new_name}") for png_dirname in os.listdir(tileset_dirname): if not png_dirname.startswith("pngs_"): continue - png_path = tileset_dirname + "/" + png_dirname + png_path = f"{tileset_dirname}/{png_dirname}" for subdir_fpath, dirnames, filenames in os.walk(png_path): for filename in filenames: - old_path = subdir_fpath + "/" + filename + old_path = f"{subdir_fpath}/{filename}" if filename.endswith(".json"): convert_tile_entry_file(old_path, old_name, new_name) if filename == old_name_png: - new_path = subdir_fpath + "/" + new_name_png + new_path = f"{subdir_fpath}/{new_name_png}" os.rename(old_path, new_path) elif filename == old_name_json: - new_path = subdir_fpath + "/" + new_name_json + new_path = f"{subdir_fpath}/{new_name_json}" os.rename(old_path, new_path) diff --git a/tools/json_tools/cddatags.py b/tools/json_tools/cddatags.py index 6cb61d9cf45a..32e86cf6a6da 100755 --- a/tools/json_tools/cddatags.py +++ b/tools/json_tools/cddatags.py @@ -11,8 +11,8 @@ TAGS_FILE = os.path.join(TOP_DIR, "tags") def make_tags_line(id_key, id, filename): - pattern = '/"{id_key}": "{id}"/'.format(id_key=id_key, id=id) - return '\t'.join((id, filename, pattern)).encode('utf-8') + pattern = f'/"{id_key}": "{id}"/' + return f"{id}\t{filename}\t{pattern}".encode('utf-8') def is_json_tag_line(line): return b'.json\t' in line @@ -41,8 +41,7 @@ def main(args): json_data = json.load(file) except Exception as err: sys.stderr.write( - "Problem reading file %s, reason: %s" % - (filename, err)) + f"Problem reading file {filename}, reason: {err}") continue if type(json_data) == dict: json_data = [json_data] diff --git a/tools/json_tools/keys.py b/tools/json_tools/keys.py index eb2e6660f949..1e4982d0290b 100755 --- a/tools/json_tools/keys.py +++ b/tools/json_tools/keys.py @@ -55,8 +55,8 @@ if args.human: title = "Count of keys" - print("\n\n%s" % title) - print("(Data from %s out of %s blobs)" % (num_matches, len(json_data))) + print(f"\n\n{title}") + print(f"(Data from {num_matches} out of {len(json_data)} blobs)") print("-" * len(title)) ui_counts_to_columns(stats) else: diff --git a/tools/json_tools/util.py b/tools/json_tools/util.py index 18116f0f4c21..1e6935f5d6fc 100755 --- a/tools/json_tools/util.py +++ b/tools/json_tools/util.py @@ -36,9 +36,9 @@ def import_data(json_dir=JSON_DIR, json_fmatch=JSON_FNMATCH): try: candidates = json.load(file, object_pairs_hook=OrderedDict) except Exception as err: - errors.append("Problem reading file %s, reason: %s" % (json_file, err)) + errors.append(f"Problem reading file {json_file}, reason: {err}") if type(candidates) != list: - errors.append("Problem parsing data from file %s, reason: expected a list." % json_file) + errors.append(f"Problem parsing data from file {json_file}, reason: expected a list.") else: data += candidates return (data, errors) @@ -250,7 +250,7 @@ def indented_write(self, s): self.buf.write(self.indent*self.indent_multiplier + s) def write_key(self, k): - self.indented_write("\"%s\": " % k) + self.indented_write(f"\"{k}\": ") def write_primitive_key_val(self, k, v): self.write_key(k) diff --git a/tools/json_tools/values.py b/tools/json_tools/values.py index ee43993933b9..803a2a107818 100755 --- a/tools/json_tools/values.py +++ b/tools/json_tools/values.py @@ -58,9 +58,9 @@ sys.exit(1) if args.human: - title = "Count of values from field '%s'" % search_key - print("\n\n%s" % title) - print("(Data from %s out of %s blobs)" % (num_matches, len(json_data))) + title = f"Count of values from field '{search_key}'" + print(f"\n\n{title}") + print(f"(Data from {num_matches} out of {len(json_data)} blobs)") print("-" * len(title)) ui_counts_to_columns(stats) else: diff --git a/tools/merge_maps.py b/tools/merge_maps.py index 5880db198eef..ca5107f02a8e 100755 --- a/tools/merge_maps.py +++ b/tools/merge_maps.py @@ -25,20 +25,19 @@ def x_y_bucket(x, y): - return "{}__{}".format(math.floor((x - MIN_X) / STRIDE_X), math.floor((y - MIN_Y) / STRIDE_Y)) + return f"{math.floor((x - MIN_X) / STRIDE_X)}__{math.floor((y - MIN_Y) / STRIDE_Y)}" def x_y_sub(x, y, is_north): if is_north: - return "{}__{}".format((x - MIN_X) % STRIDE_X, (y - MIN_Y) % STRIDE_Y) + return f"{(x - MIN_X) % STRIDE_X}__{(y - MIN_Y) % STRIDE_Y}" else: - return "{}__{}".format((x - MIN_X - 1) % STRIDE_X, - (y - MIN_Y - 1) % STRIDE_Y) + return f"{(x - MIN_X - 1) % STRIDE_X}__{(y - MIN_Y - 1) % STRIDE_Y}" def x_y_simple(x, y): - return "{}__{}".format(x, y) + return f"{x}__{y}" def get_data(argsDict, resource_name): @@ -52,9 +51,9 @@ def get_data(argsDict, resource_name): with open(resource_filename) as resource_file: resource += json.load(resource_file) except FileNotFoundError: - exit("Failed: could not find {}".format(resource_filename)) + exit(f"Failed: could not find {resource_filename}") else: - print("Invalid filename {}".format(resource_filename)) + print(f"Invalid filename {resource_filename}") return resource @@ -151,9 +150,9 @@ def adjust_coord(x_or_y, new_entry, old_entry, offset): args.add_argument("specials_sources", action="store", nargs="+", help="specify json file with overmap special data.") args.add_argument("--x", dest="stride_x", action="store", - help="number of horizontal maps in each block. Defaults to {}.".format(STRIDE_X)) + help=f"number of horizontal maps in each block. Defaults to {STRIDE_X}.") args.add_argument("--y", dest="stride_y", action="store", - help="number of vertictal maps in each block. Defaults to {}.".format(STRIDE_Y)) + help=f"number of vertictal maps in each block. Defaults to {STRIDE_Y}.") args.add_argument("--output", dest="output_name", action="store", help="Name of output file. Defaults to the command line.") argsDict = vars(args.parse_args()) @@ -220,7 +219,7 @@ def adjust_coord(x_or_y, new_entry, old_entry, offset): # dynamically expand the list of "place_" terms for term in KEYED_TERMS: - PLACE_TERMS.append("place_" + term) + PLACE_TERMS.append(f"place_{term}") basic_entry = { "method": "json", @@ -318,7 +317,7 @@ def adjust_coord(x_or_y, new_entry, old_entry, offset): if not col_offset: new_rows.append(old_rows[i]) else: - new_rows[i + row_offset] = "".join([new_rows[i + row_offset], old_rows[i]]) + new_rows[i + row_offset] = f"{new_rows[i + row_offset]}{old_rows[i]}" if len(maps) == 1: entry["om_terrain"] = om_id else: @@ -344,4 +343,4 @@ def adjust_coord(x_or_y, new_entry, old_entry, offset): output_file.write(json.dumps(new_mapgen)) exit() -print("{}".format(json.dumps(new_mapgen, indent=2))) +print(f"{json.dumps(new_mapgen, indent=2)}") diff --git a/tools/palettize.py b/tools/palettize.py index 18a34dd13291..bb1a79c136ef 100755 --- a/tools/palettize.py +++ b/tools/palettize.py @@ -6,7 +6,7 @@ def hash_key(key): if isinstance(key, list): - return "list_" + "".join(key) + return f"list_{''.join(key)}" else: return key @@ -64,15 +64,15 @@ def resolve_conflicts(om_objs, palette, conflicts): mapgen = [] mapgen_source = argsDict.get("mapgen_source", "") palette_name = argsDict.get("palette_name", "") -palette_source = argsDict.get("palette_path", "") + "/" + palette_name + ".json" +palette_source = f"{argsDict.get('palette_path', '')}/{palette_name}.json" if mapgen_source.endswith(".json"): try: with open(mapgen_source) as mapgen_file: mapgen += json.load(mapgen_file) except FileNotFoundError: - exit("Failed: could not find {}".format(mapgen_source)) + exit(f"Failed: could not find {mapgen_source}") else: - exit("Failed: invalid mapgen file name {}".format(mapgen_source)) + exit(f"Failed: invalid mapgen file name {mapgen_source}") furn_pal = {} furn_conflicts = {} diff --git a/tools/update_blueprint_needs.py b/tools/update_blueprint_needs.py index 83656e297fbb..8941fb3d5863 100755 --- a/tools/update_blueprint_needs.py +++ b/tools/update_blueprint_needs.py @@ -88,14 +88,14 @@ def main(argv): reqs += line if complete: update_blueprints[ident] = json.loads(reqs); - print("{} needs updating".format(ident)) + print(f"{ident} needs updating") if len(update_blueprints) == 0: print("no inconsistency reported in the test log") return for json_dir in json_dirs: - print("walking dir {}".format(json_dir)) + print(f"walking dir {json_dir}") for root, dirs, files in os.walk(json_dir): for file in files: json_path = os.path.join(root, file) @@ -118,7 +118,7 @@ def main(argv): else: ident = obj["result"] if "id_suffix" in obj: - ident += "_" + obj["id_suffix"] + ident += f"_{obj['id_suffix']}" if ident in update_blueprints: if suppress: obj["check_blueprint_needs"] = False @@ -126,7 +126,7 @@ def main(argv): obj["blueprint_needs"] = update_blueprints[ident] if not changed: changed = True - print("updating {}".format(json_path)) + print(f"updating {json_path}") if changed: dump_json_and_lint(content, json_path) diff --git a/utilities/building-utility/deconstruct.py b/utilities/building-utility/deconstruct.py index 874f1dabdd6a..3a5edd1333e7 100755 --- a/utilities/building-utility/deconstruct.py +++ b/utilities/building-utility/deconstruct.py @@ -187,7 +187,7 @@ def complete_json_file(template_file, all_cells, remove_template=True): json_output_list.append(copy_of_template) # TODO: better output file names - with open("output_" + os.path.basename(template_file.name), + with open(f"output_{os.path.basename(template_file.name)}", "w") as outfile: json.dump(json_output_list, outfile, indent=4, separators=(",", ": "), sort_keys=True)