Skip to content

Commit

Permalink
Fixed a bug relted to the removal of data.
Browse files Browse the repository at this point in the history
Because data could also be used in a parallel state we have to specify the validate flag and wrap the call in a `try`-`catch` block.
  • Loading branch information
philip-paul-mueller committed Dec 4, 2024
1 parent 763e0c0 commit 540c7a2
Showing 1 changed file with 20 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,12 @@ def apply(
if not is_tmp_used_downstream:
graph.remove_node(read_g)
graph.remove_node(tmp_node)
sdfg.remove_data(tmp_node.data)
# It could still be used in a parallel branch.
try:
sdfg.remove_data(tmp_node.data, validate=True)
except ValueError as e:
if not str(e).startswith(f"Cannot remove data descriptor {tmp_node.data}:"):
raise


AccessLocation: TypeAlias = tuple[dace.SDFGState, dace_nodes.AccessNode]
Expand Down Expand Up @@ -750,7 +755,12 @@ def apply(
nodes_to_ignore={access_node},
):
graph.remove_nodes_from([tasklet, access_node])
sdfg.remove_data(access_node.data, validate=True)
# Needed if data is accessed in a parallel branch.
try:
sdfg.remove_data(access_node.data, validate=True)
except ValueError as e:
if not str(e).startswith(f"Cannot remove data descriptor {access_node.data}:"):
raise

def _modify_downstream_memlets(
self,
Expand Down Expand Up @@ -976,7 +986,14 @@ def apply(
graph.remove_edge(map_to_tmp_edge)
graph.remove_edge(tmp_to_glob_edge)
graph.remove_node(tmp_ac)
sdfg.remove_data(tmp_ac.data, validate=False)

# We can not unconditionally remove the data `tmp` refers to, because
# it could be that in a parallel branch the `tmp` is also defined.
try:
sdfg.remove_data(tmp_ac.data, validate=True)
except ValueError as e:
if not str(e).startswith(f"Cannot remove data descriptor {tmp_ac.data}:"):
raise

# Now we must modify the memlets inside the map scope, because
# they now write into `G` instead of `tmp`, which has a different
Expand Down

0 comments on commit 540c7a2

Please sign in to comment.