Skip to content

Commit

Permalink
Misc lint
Browse files Browse the repository at this point in the history
  • Loading branch information
rocky committed Mar 2, 2024
1 parent 51639b8 commit 975171e
Show file tree
Hide file tree
Showing 4 changed files with 47 additions and 42 deletions.
41 changes: 21 additions & 20 deletions decompyle3/parsers/p38/lambda_custom.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2020-2023 Rocky Bernstein
# Copyright (c) 2020-2024 Rocky Bernstein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
Expand Down Expand Up @@ -49,22 +49,22 @@ def customize_grammar_rules_lambda38(self, tokens, customize):
)
)

# Opcode names in the custom_ops_processed set have rules that get added
# unconditionally and the rules are constant. So they need to be done
# only once and if we see the opcode a second we don't have to consider
# adding more rules.
# Opcode names in the custom_ops_processed set have rules that get
# added unconditionally and the rules are constant. So they need to be
# done only once, and if we see the opcode a second time, we don't have
# to consider adding more rules.
#
custom_ops_processed = frozenset()

# A set of instruction operation names that exist in the token stream.
# We use this customize the grammar that we create.
# We use this to customize the grammar that we create.
# 2.6-compatible set comprehensions
self.seen_ops = frozenset([t.kind for t in tokens])
self.seen_op_basenames = frozenset(
[opname[: opname.rfind("_")] for opname in self.seen_ops]
)

custom_ops_processed = set(["DICT_MERGE"])
custom_ops_processed = {"DICT_MERGE"}

# Loop over instructions adding custom grammar rules based on
# a specific instruction seen.
Expand Down Expand Up @@ -96,7 +96,7 @@ def customize_grammar_rules_lambda38(self, tokens, customize):
opname = token.kind

# Do a quick breakout before testing potentially
# each of the dozen or so instruction in if elif.
# each of the dozen or so instructions in "if"/"elif".
if (
opname[: opname.find("_")] not in customize_instruction_basenames
or opname in custom_ops_processed
Expand All @@ -106,7 +106,7 @@ def customize_grammar_rules_lambda38(self, tokens, customize):
opname_base = opname[: opname.rfind("_")]

# Do a quick breakout before testing potentially
# each of the dozen or so instruction in if elif.
# each of the dozen or so instructions in "if"/"elif".
if (
opname[: opname.find("_")] not in customize_instruction_basenames
or opname in custom_ops_processed
Expand Down Expand Up @@ -221,7 +221,7 @@ def customize_grammar_rules_lambda38(self, tokens, customize):

if not is_LOAD_CLOSURE or v == 0:
# We do this complicated test to speed up parsing of
# pathelogically long literals, especially those over 1024.
# pathologically long literals, especially those over 1024.
build_count = token.attr
thousands = build_count // 1024
thirty32s = (build_count // 32) % 32
Expand Down Expand Up @@ -298,7 +298,7 @@ def customize_grammar_rules_lambda38(self, tokens, customize):
elif opname == "GET_AITER":
self.add_unique_doc_rules("get_aiter ::= expr GET_AITER", customize)

if not {"MAKE_FUNCTION_0", "MAKE_FUNCTION_CLOSURE"} in self.seen_ops:
if {"MAKE_FUNCTION_0", "MAKE_FUNCTION_CLOSURE"} not in self.seen_ops:
self.addRule(
"""
expr ::= dict_comp_async
Expand Down Expand Up @@ -568,13 +568,13 @@ def customize_grammar_rules_lambda38(self, tokens, customize):
# line 447:
# lambda i: lambda n, m=None, l=None: ...
# which has
# L. 447 0 LOAD_CONST (None, None)
# 2 LOAD_CLOSURE 'i'
# 4 LOAD_CLOSURE 'predicate'
# 6 BUILD_TUPLE_2 2
# 8 LOAD_LAMBDA '<code_object <lambda>>'
# 10 LOAD_STR '_tgrep_relation_action.<locals>.<lambda>.<locals>.<lambda>'
# 12 MAKE_FUNCTION_CLOSURE_POS 'default, closure'
# L. 447 0 LOAD_CONST (None, None)
# 2 LOAD_CLOSURE 'i'
# 4 LOAD_CLOSURE 'predicate'
# 6 BUILD_TUPLE_2 2
# 8 LOAD_LAMBDA '<code_object <lambda>>'
# 10 LOAD_STR '_tgrep_relation_action.<locals>...'
# 12 MAKE_FUNCTION_CLOSURE_POS 'default, closure'
# FIXME: Possibly we need to generalize for more nested lambda's of lambda's?
rule = """
expr ::= lambda_body
Expand Down Expand Up @@ -670,8 +670,9 @@ def custom_classfunc_rule(self, opname, token, customize, next_token):
rule = "call_kw36 ::= expr {values} LOAD_CONST {opname}".format(**locals())
self.add_unique_rule(rule, token.kind, token.attr, customize)
elif opname == "CALL_FUNCTION_EX_KW":
# Note that we don't add to customize token.kind here. Instead, the non-terminal
# names call_ex_kw... are is in semantic actions.
# Note that we don't add to customize token.kind here.
# Instead, the non-terminal names, "call_ex_kw"...
# are in semantic actions.
self.addRule(
"""expr ::= call_ex_kw4
call_ex_kw4 ::= expr
Expand Down
31 changes: 16 additions & 15 deletions decompyle3/scanner.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,11 +89,10 @@ def __init__(self, version: tuple, show_asm=None, is_pypy=False):
if is_pypy:
v_str += "pypy"
exec(f"""from xdis.opcodes import {v_str}""")
exec("self.opc = %s" % v_str)
exec(f"self.opc = {v_str}")
else:
raise TypeError(
"%s is not a Python version I know about"
% version_tuple_to_str(version)
f"{version_tuple_to_str(version)} is not a Python version I know about"
)

self.opname = self.opc.opname
Expand Down Expand Up @@ -370,7 +369,7 @@ def inst_matches(self, start, end, instr, target=None, include_beyond_target=Fal
pass
pass
pass
if isinstance(inst, int) and inst.offset >= end:
if isinstance(inst.offset, int) and inst.offset >= end:
break
pass

Expand Down Expand Up @@ -450,6 +449,7 @@ def remove_extended_args(self, instructions):
new_instructions = []
last_was_extarg = False
n = len(instructions)
starts_line = False
for i, inst in enumerate(instructions):
if (
inst.opname == "EXTENDED_ARG"
Expand Down Expand Up @@ -514,8 +514,8 @@ def restrict_to_parent(self, target: int, parent) -> int:
target = parent["end"]
return target

def setTokenClass(self, tokenClass: Token) -> Token:
self.Token = tokenClass
def setTokenClass(self, token_class: Token) -> Token:
self.Token = token_class
return self.Token


Expand All @@ -538,44 +538,45 @@ def get_scanner(version: Union[str, tuple], is_pypy=False, show_asm=None) -> Sca
import importlib

if is_pypy:
scan = importlib.import_module("decompyle3.scanners.pypy%s" % v_str)
scan = importlib.import_module(f"decompyle3.scanners.pypy{v_str}")
else:
scan = importlib.import_module("decompyle3.scanners.scanner%s" % v_str)
scan = importlib.import_module(f"decompyle3.scanners.scanner{v_str}")
if False:
print(scan) # Avoid unused scan
except ImportError:
if is_pypy:
exec(
"import decompyle3.scanners.pypy%s as scan" % v_str,
f"import decompyle3.scanners.pypy{v_str} as scan",
locals(),
globals(),
)
else:
exec(
"import decompyle3.scanners.scanner%s as scan" % v_str,
f"import decompyle3.scanners.scanner{v_str} as scan",
locals(),
globals(),
)
if is_pypy:
scanner = eval(
"scan.ScannerPyPy%s(show_asm=show_asm)" % v_str, locals(), globals()
f"scan.ScannerPyPy{v_str}(show_asm=show_asm)", locals(), globals()
)
else:
scanner = eval(
"scan.Scanner%s(show_asm=show_asm)" % v_str, locals(), globals()
f"scan.Scanner{v_str}(show_asm=show_asm)", locals(), globals()
)
else:
raise RuntimeError(
f"Unsupported Python version, {version_tuple_to_str(version)}, for decompilation"
"Unsupported Python version, "
f"{version_tuple_to_str(version)}, for decompilation"
)
return scanner


if __name__ == "__main__":
import inspect

co = inspect.currentframe().f_code
my_co = inspect.currentframe().f_code
from xdis.version_info import PYTHON_VERSION_TRIPLE

scanner = get_scanner(PYTHON_VERSION_TRIPLE, IS_PYPY, True)
tokens, customize = scanner.ingest(co, {}, show_asm="after")
tokens, customize = scanner.ingest(my_co, {}, show_asm="after")
13 changes: 8 additions & 5 deletions decompyle3/scanners/scanner37.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,17 @@ def ingest(
Some transformations are made to assist the deparsing grammar:
- various types of LOAD_CONST's are categorized in terms of what they load
- COME_FROM instructions are added to assist parsing control structures
- operands with stack argument counts or flag masks are appended to the opcode name, e.g.:
- operands with stack argument counts or flag masks are appended to the
opcode name, e.g.:
* BUILD_LIST, BUILD_SET
* MAKE_FUNCTION and FUNCTION_CALLS append the number of positional arguments
* MAKE_FUNCTION and FUNCTION_CALLS append the number of positional
arguments
- EXTENDED_ARGS instructions are removed
Also, when we encounter certain tokens, we add them to a set which will cause custom
grammar rules. Specifically, variable arg tokens like MAKE_FUNCTION or BUILD_LIST
cause specific rules for the specific number of arguments they take.
Also, when we encounter certain tokens, we add them to a set
which will cause custom grammar rules. Specifically, variable
arg tokens like MAKE_FUNCTION or BUILD_LIST cause specific rules
for the specific number of arguments they take.
"""
tokens, customize = Scanner37Base.ingest(
self, bytecode, classname, code_objects, show_asm
Expand Down
4 changes: 2 additions & 2 deletions decompyle3/scanners/scanner37base.py
Original file line number Diff line number Diff line change
Expand Up @@ -1081,9 +1081,9 @@ def next_except_jump(self, start):
if PYTHON_VERSION_TRIPLE[:2] >= (3, 7):
import inspect

co = inspect.currentframe().f_code # type: ignore
my_co = inspect.currentframe().f_code # type: ignore

my_tokens, customize = Scanner37Base(PYTHON_VERSION_TRIPLE).ingest(co)
my_tokens, customize = Scanner37Base(PYTHON_VERSION_TRIPLE).ingest(my_co)
for token in my_tokens:
print(token)
else:
Expand Down

0 comments on commit 975171e

Please sign in to comment.