Skip to content

Commit

Permalink
Updated actions, removed flake8, fixed unused vars
Browse files Browse the repository at this point in the history
  • Loading branch information
Boris committed Apr 26, 2024
1 parent fbd22c1 commit 7a3e191
Show file tree
Hide file tree
Showing 20 changed files with 207 additions and 396 deletions.
4 changes: 1 addition & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,11 @@ repos:
- id: flake8
args:
- --ignore=E203,W503
- --max-line-length=120
- --max-line-length=100
files: '\.py$'
additional_dependencies: [ flake8 ]




- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.4.1
hooks:
Expand Down
4 changes: 1 addition & 3 deletions import_specifications/clients/authclient.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,7 @@ def add_valid_token(self, token, user):
with self._lock:
self._cache[token] = [user, _time.time()]
if len(self._cache) > self._maxsize:
sorted_items = sorted(
list(self._cache.items()), key=(lambda v: v[1][1])
)
sorted_items = sorted(list(self._cache.items()), key=(lambda v: v[1][1]))
for i, (t, _) in enumerate(sorted_items):
if i <= self._halfmax:
del self._cache[t]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,7 @@ def ver(self, context=None):
Returns the current running version of the NarrativeMethodStore.
:returns: instance of String
"""
return self._client.call_method(
"NarrativeMethodStore.ver", [], self._service_ver, context
)
return self._client.call_method("NarrativeMethodStore.ver", [], self._service_ver, context)

def status(self, context=None):
"""
Expand Down
8 changes: 2 additions & 6 deletions import_specifications/generate_import_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,7 @@


def parse_args():
parser = argparse.ArgumentParser(
description="Generate a bulk import template for an app"
)
parser = argparse.ArgumentParser(description="Generate a bulk import template for an app")
parser.add_argument(
"app_id",
help="The app ID to process, for example kb_uploadmethods/import_sra_as_reads_from_staging",
Expand Down Expand Up @@ -67,9 +65,7 @@ def is_file_input(param):
if param["field_type"] != "dynamic_dropdown":
return False
if "dynamic_dropdown_options" not in param:
raise ValueError(
"Missing dynamic_dropdown_options field for dynamic_dropdown input"
)
raise ValueError("Missing dynamic_dropdown_options field for dynamic_dropdown input")
return param["dynamic_dropdown_options"].get("data_source") == "ftp_staging"


Expand Down
77 changes: 77 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
[tool.ruff]
# Exclude a variety of commonly ignored directories.
exclude = [
".bzr",
".direnv",
".eggs",
".git",
".git-rewrite",
".hg",
".ipynb_checkpoints",
".mypy_cache",
".nox",
".pants.d",
".pyenv",
".pytest_cache",
".pytype",
".ruff_cache",
".svn",
".tox",
".venv",
".vscode",
"__pypackages__",
"_build",
"buck-out",
"build",
"dist",
"node_modules",
"site-packages",
"venv",
]

# Same as Black.
line-length = 100
indent-width = 4

target-version = "py39"

[tool.ruff.lint]
# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
# Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or
# McCabe complexity (`C901`) by default.
select = ["E4", "E7", "E9", "F"]
ignore = []

# Allow fix for all enabled rules (when `--fix`) is provided.
fixable = ["ALL"]
unfixable = []

# Allow unused variables when underscore-prefixed.
dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"

[tool.ruff.format]
# Like Black, use double quotes for strings.
quote-style = "double"

# Like Black, indent with spaces, rather than tabs.
indent-style = "space"

# Like Black, respect magic trailing commas.
skip-magic-trailing-comma = false

# Like Black, automatically detect the appropriate line ending.
line-ending = "auto"

# Enable auto-formatting of code examples in docstrings. Markdown,
# reStructuredText code/literal blocks and doctests are all supported.
#
# This is currently disabled by default, but it is planned for this
# to be opt-out in the future.
docstring-code-format = false

# Set the line length limit used when formatting code snippets in
# docstrings.
#
# This only has an effect when the `docstring-code-format` setting is
# enabled.
docstring-code-line-length = "dynamic"
12 changes: 3 additions & 9 deletions scripts/prune_acls.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,7 @@

client = globus_sdk.NativeAppAuthClient(cf["client_id"])
try:
transfer_authorizer = globus_sdk.RefreshTokenAuthorizer(
cf["transfer_token"], client
)
transfer_authorizer = globus_sdk.RefreshTokenAuthorizer(cf["transfer_token"], client)
globus_transfer_client = globus_sdk.TransferClient(authorizer=transfer_authorizer)
auth_authorizer = globus_sdk.RefreshTokenAuthorizer(cf["auth_token"], client)
globus_auth_client = globus_sdk.AuthClient(authorizer=auth_authorizer)
Expand All @@ -54,9 +52,7 @@ def remove_directory(directory):
logging.info("About to delete {}".format(directory))
# shutil.rmtree(directory)
except OSError as error:
logging.error(
"Couldn't delete {} {} {}".format(directory, error.message, error.filename)
)
logging.error("Couldn't delete {} {} {}".format(directory, error.message, error.filename))


def remove_acl(acl):
Expand All @@ -81,9 +77,7 @@ def main():

old_acls = get_old_acls()

logging.info(
"{}:ATTEMPTING TO DELETE {} OLD ACLS".format(current_time, len(old_acls))
)
logging.info("{}:ATTEMPTING TO DELETE {} OLD ACLS".format(current_time, len(old_acls)))
for acl in old_acls:
remove_acl(acl.acl)
remove_directory(acl.dir)
Expand Down
72 changes: 17 additions & 55 deletions staging_service/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,13 +93,7 @@ def _file_type_resolver(path: PathPy) -> FileTypeResolution:
if ftype in _IMPSPEC_FILE_TO_PARSER:
return FileTypeResolution(parser=_IMPSPEC_FILE_TO_PARSER[ftype])
else:
ext = (
fi["suffix"]
if fi["suffix"]
else path.suffix[1:]
if path.suffix
else path.name
)
ext = fi["suffix"] if fi["suffix"] else path.suffix[1:] if path.suffix else path.name
return FileTypeResolution(unsupported_type=ext)


Expand All @@ -124,9 +118,7 @@ async def bulk_specification(request: web.Request) -> web.json_response:
res = parse_import_specifications(
tuple(list(paths)),
_file_type_resolver,
lambda e: logging.error(
"Unexpected error while parsing import specs", exc_info=e
),
lambda e: logging.error("Unexpected error while parsing import specs", exc_info=e),
)
if res.results:
types = {dt: result.result for dt, result in res.results.items()}
Expand Down Expand Up @@ -192,9 +184,7 @@ async def write_bulk_specification(request: web.Request) -> web.json_response:
folder = data.get("output_directory")
type_ = data.get("output_file_type")
if type(folder) != str: # noqa E721
return _createJSONErrorResponse(
"output_directory is required and must be a string"
)
return _createJSONErrorResponse("output_directory is required and must be a string")
writer = _IMPSPEC_FILE_TO_WRITER.get(type_)
if not writer:
return _createJSONErrorResponse(f"Invalid output_file_type: {type_}")
Expand All @@ -219,12 +209,8 @@ async def add_acl_concierge(request: web.Request):
user_dir = Path.validate_path(username).full_path
concierge_path = f"{Path._CONCIERGE_PATH}/{username}/"
aclm = AclManager()
result = aclm.add_acl_concierge(
shared_directory=user_dir, concierge_path=concierge_path
)
result["msg"] = (
f"Requesting Globus Perms for the following globus dir: {concierge_path}"
)
result = aclm.add_acl_concierge(shared_directory=user_dir, concierge_path=concierge_path)
result["msg"] = f"Requesting Globus Perms for the following globus dir: {concierge_path}"
result["link"] = (
f"https://app.globus.org/file-manager?destination_id={aclm.endpoint_id}&destination_path={concierge_path}"
)
Expand Down Expand Up @@ -298,9 +284,7 @@ async def list_files(request: web.Request):
username = await authorize_request(request)
path = Path.validate_path(username, request.match_info.get("path", ""))
if not os.path.exists(path.full_path):
raise web.HTTPNotFound(
text="path {path} does not exist".format(path=path.user_path)
)
raise web.HTTPNotFound(text="path {path} does not exist".format(path=path.user_path))
elif os.path.isfile(path.full_path):
raise web.HTTPBadRequest(
text="{path} is a file not a directory".format(path=path.full_path)
Expand All @@ -325,17 +309,13 @@ async def download_files(request: web.Request):
username = await authorize_request(request)
path = Path.validate_path(username, request.match_info.get("path", ""))
if not os.path.exists(path.full_path):
raise web.HTTPNotFound(
text="path {path} does not exist".format(path=path.user_path)
)
raise web.HTTPNotFound(text="path {path} does not exist".format(path=path.user_path))
elif not os.path.isfile(path.full_path):
raise web.HTTPBadRequest(
text="{path} is a directory not a file".format(path=path.full_path)
)
# hard coding the mime type to force download
return web.FileResponse(
path.full_path, headers={"content-type": "application/octet-stream"}
)
return web.FileResponse(path.full_path, headers={"content-type": "application/octet-stream"})

Check failure

Code scanning / CodeQL

Uncontrolled data used in path expression High

This path depends on a
user-provided value
.
This path depends on a
user-provided value
.


@routes.get("/similar/{path:.+}")
Expand All @@ -346,9 +326,7 @@ async def similar_files(request: web.Request):
username = await authorize_request(request)
path = Path.validate_path(username, request.match_info["path"])
if not os.path.exists(path.full_path):
raise web.HTTPNotFound(
text="path {path} does not exist".format(path=path.user_path)
)
raise web.HTTPNotFound(text="path {path} does not exist".format(path=path.user_path))
elif os.path.isdir(path.full_path):
raise web.HTTPBadRequest(
text="{path} is a directory not a file".format(path=path.full_path)
Expand Down Expand Up @@ -400,9 +378,7 @@ async def get_metadata(request: web.Request):
username = await authorize_request(request)
path = Path.validate_path(username, request.match_info["path"])
if not os.path.exists(path.full_path):
raise web.HTTPNotFound(
text="path {path} does not exist".format(path=path.user_path)
)
raise web.HTTPNotFound(text="path {path} does not exist".format(path=path.user_path))
return web.json_response(await some_metadata(path))


Expand Down Expand Up @@ -430,9 +406,7 @@ async def upload_files_chunked(request: web.Request):
counter = 0
user_file = None
destPath = None
while (
counter < 100
): # TODO this is arbitrary to keep an attacker from creating infinite loop
while counter < 100: # TODO this is arbitrary to keep an attacker from creating infinite loop
# This loop handles the null parts that come in inbetween destpath and file
part = await reader.next()

Expand Down Expand Up @@ -498,9 +472,7 @@ async def define_UPA(request: web.Request):
path = Path.validate_path(username, request.match_info["path"])
if not os.path.exists(path.full_path or not os.path.isfile(path.full_path)):
# TODO the security model here is to not care if someone wants to put in a false upa
raise web.HTTPNotFound(
text="no file found found on path {}".format(path.user_path)
)
raise web.HTTPNotFound(text="no file found found on path {}".format(path.user_path))
if not request.has_body:
raise web.HTTPBadRequest(text="must provide UPA field in body")
body = await request.post()
Expand All @@ -510,9 +482,7 @@ async def define_UPA(request: web.Request):
raise web.HTTPBadRequest(text="must provide UPA field in body")
await add_upa(path, UPA)
return web.Response(
text="succesfully updated UPA {UPA} for file {path}".format(
UPA=UPA, path=path.user_path
)
text="succesfully updated UPA {UPA} for file {path}".format(UPA=UPA, path=path.user_path)
)


Expand All @@ -537,9 +507,7 @@ async def delete(request: web.Request):
if os.path.exists(path.metadata_path):
shutil.rmtree(path.metadata_path)
else:
raise web.HTTPNotFound(
text="could not delete {path}".format(path=path.user_path)
)
raise web.HTTPNotFound(text="could not delete {path}".format(path=path.user_path))
return web.Response(text="successfully deleted {path}".format(path=path.user_path))


Expand Down Expand Up @@ -591,13 +559,9 @@ async def decompress(request: web.Request):
# 2 could try again after doign an automatic rename scheme (add nubmers to end)
# 3 just overwrite and force
destination = os.path.dirname(path.full_path)
if (
upper_file_extension == ".tar" and file_extension == ".gz"
) or file_extension == ".tgz":
if (upper_file_extension == ".tar" and file_extension == ".gz") or file_extension == ".tgz":
await run_command("tar", "xzf", path.full_path, "-C", destination)
elif upper_file_extension == ".tar" and (
file_extension == ".bz" or file_extension == ".bz2"
):
elif upper_file_extension == ".tar" and (file_extension == ".bz" or file_extension == ".bz2"):
await run_command("tar", "xjf", path.full_path, "-C", destination)
elif file_extension == ".zip" or file_extension == ".ZIP":
await run_command("unzip", path.full_path, "-d", destination)
Expand All @@ -608,9 +572,7 @@ async def decompress(request: web.Request):
elif file_extension == ".bz2" or file_extension == "bzip2":
await run_command("bzip2", "-d", path.full_path)
else:
raise web.HTTPBadRequest(
text="cannot decompress a {ext} file".format(ext=file_extension)
)
raise web.HTTPBadRequest(text="cannot decompress a {ext} file".format(ext=file_extension))
return web.Response(text="succesfully decompressed " + path.user_path)


Expand Down
10 changes: 2 additions & 8 deletions staging_service/app_error_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,7 @@
"file": file1,
"tab": tab1,
},
ErrorType.MULTIPLE_SPECIFICATIONS_FOR_DATA_TYPE: lambda msg,
file1,
tab1,
file2,
tab2: {
ErrorType.MULTIPLE_SPECIFICATIONS_FOR_DATA_TYPE: lambda msg, file1, tab1, file2, tab2: {
"type": "multiple_specifications_for_data_type",
"message": msg,
"file_1": file1,
Expand Down Expand Up @@ -71,7 +67,5 @@ def format_import_spec_errors(
if e.source_2:
file2 = str(path_translations[e.source_2.file])
tab2 = e.source_2.tab
errs.append(
_IMPORT_SPEC_ERROR_FORMATTERS[e.error](e.message, file1, tab1, file2, tab2)
)
errs.append(_IMPORT_SPEC_ERROR_FORMATTERS[e.error](e.message, file1, tab1, file2, tab2))
return errs
8 changes: 2 additions & 6 deletions staging_service/auth2Client.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,7 @@ def add_valid_token(self, token, user, expire_time):
token = hashlib.sha256(token.encode("utf8")).hexdigest()
self._cache[token] = [user, _time.time(), expire_time]
if len(self._cache) > self._maxsize:
for i, (t, _) in enumerate(
sorted(self._cache.items(), key=lambda v: v[1][1])
):
for i, (t, _) in enumerate(sorted(self._cache.items(), key=lambda v: v[1][1])):
if i <= self._halfmax:
del self._cache[t]
else:
Expand All @@ -69,9 +67,7 @@ async def get_user(self, token):
if user:
return user
async with aiohttp.ClientSession() as session:
async with session.get(
self._authurl, headers={"Authorization": token}
) as resp:
async with session.get(self._authurl, headers={"Authorization": token}) as resp:
ret = await resp.json()
if not resp.reason == "OK":
raise aiohttp.web.HTTPUnauthorized(
Expand Down
Loading

0 comments on commit 7a3e191

Please sign in to comment.