Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Don't copy dataframes or use inplace=True #305

Merged
merged 5 commits into from
Jan 31, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 3 additions & 4 deletions xl2times/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,8 +313,7 @@ def keep_last_by_file_order(df):
df = df.sort_values(by="file_order", kind="stable")
df = df.drop(columns=["source_filename", "file_order"])
df = df.drop_duplicates(keep="last")
df.reset_index(drop=True, inplace=True)
return df
return df.reset_index(drop=True)

result = {}
used_tables = set()
Expand All @@ -326,7 +325,7 @@ def keep_last_by_file_order(df):
)
else:
used_tables.add(mapping.xl_name)
df = input[mapping.xl_name].copy()
df = input[mapping.xl_name]
# Filter rows according to filter_rows mapping:
for filter_col, filter_val in mapping.filter_rows.items():
if filter_col not in df.columns:
Expand All @@ -352,7 +351,7 @@ def keep_last_by_file_order(df):
# Keep only the required columns
cols_to_keep = set(mapping.times_cols).union({"source_filename"})
cols_to_drop = [x for x in df.columns if x not in cols_to_keep]
df.drop(columns=cols_to_drop, inplace=True)
df = df.drop(columns=cols_to_drop)
# Drop duplicates, keeping last seen rows as per file order
df = keep_last_by_file_order(df)
# Drop rows with missing values
Expand Down
4 changes: 2 additions & 2 deletions xl2times/datatypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,8 @@ def veda_cgs(self) -> dict[tuple[str, str, str], str]:
cols = ["region", "process", "commodity", "csets"]
# Exclude auxillary flows
index = self.topology["io"].isin({"IN", "OUT"})
veda_cgs = self.topology[cols + ["io"]][index].copy()
veda_cgs.drop_duplicates(subset=cols, keep="last", inplace=True)
veda_cgs = self.topology[cols + ["io"]][index]
veda_cgs = veda_cgs.drop_duplicates(subset=cols, keep="last")
veda_cgs["veda_cg"] = veda_cgs["csets"] + veda_cgs["io"].str[:1]
veda_cgs = veda_cgs.set_index(["region", "process", "commodity"])[
"veda_cg"
Expand Down
2 changes: 1 addition & 1 deletion xl2times/excel.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ def extract_table(
# Make all columns names strings as some are integers e.g. years
table_df.columns = [str(x) for x in df.iloc[header_row, start_col:end_col]]

table_df.reset_index(drop=True, inplace=True)
table_df = table_df.reset_index(drop=True)

# Don't use applymap because it can convert ints to floats
# https://pandas.pydata.org/pandas-docs/stable/user_guide/gotchas.html#gotchas-intna
Expand Down
Loading
Loading