Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Optimised Abaqus interface #24

Merged
merged 15 commits into from
May 30, 2024
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 4 additions & 10 deletions piglot/solver/abaqus/fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
from typing import Dict, Any, List
import os
import re
import glob
import numpy as np
import pandas as pd
from piglot.parameter import ParameterSet
Expand Down Expand Up @@ -97,7 +96,7 @@ def check(self, parameters: ParameterSet) -> None:
"""
# Generate a dummy set of parameters (to ensure proper handling of output parameters)
values = np.array([parameter.inital_value for parameter in parameters])
param_dict = parameters.to_dict(values, input_normalised=False)
param_dict = parameters.to_dict(values)
for name in param_dict:
if not has_parameter(self.input_file, f'<{name}>'):
raise RuntimeError(f"Parameter '{name}' not found in input file.")
Expand All @@ -111,7 +110,8 @@ def check(self, parameters: ParameterSet) -> None:
self.job_name = self.__sanitize_field(self.job_name, job_list, "job")

instance_list = re.findall(r'\*Instance, name=([^,]+)', data)
self.instance_name = self.__sanitize_field(self.instance_name, instance_list,
self.instance_name = self.__sanitize_field(self.instance_name,
instance_list,
"instance")

step_list = re.findall(r'\*Step, name=([^,]+)', data)
Expand Down Expand Up @@ -180,7 +180,7 @@ def check(self, input_data: AbaqusInputData) -> None:
with open(input_file + ext, 'r', encoding='utf-8') as file:
data = file.read()

nsets_list = re.findall(r'\*Nset, nset=([^,]+)', data)
nsets_list = re.findall(r'\*Nset, nset="?([^",\s]+)"?', data)
if len(nsets_list) == 0:
raise ValueError("No sets found in the file.")
if self.set_name not in nsets_list:
Expand Down Expand Up @@ -232,12 +232,6 @@ def get(self, input_data: AbaqusInputData) -> OutputResult:
data_group = data[columns].to_numpy()
y_field = reduction[self.field](data_group, axis=1)

# Delete the extra temporary files
files = glob.glob(output_dir + '/' + input_file + '*.txt')
for file in files:
if self.set_name not in file:
os.remove(file)

tmnp19 marked this conversation as resolved.
Show resolved Hide resolved
return OutputResult(x_field, y_field)

@staticmethod
Expand Down
219 changes: 137 additions & 82 deletions piglot/solver/abaqus/reader.py
tmnp19 marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -18,25 +18,17 @@ def input_variables():
args = sys.argv
variables = {}

input_file_list = [a for a in args if a.startswith("input_file=")]
job_name_list = [a for a in args if a.startswith("job_name=")]
step_name_list = [a for a in args if a.startswith("step_name=")]
instance_name_list = [a for a in args if a.startswith("instance_name=")]

# Checks if the input_file, job_name, step_name and instance_name are not empty
input_file = input_file_list[0].replace('input_file=', '') \
if input_file_list else None
job_name = job_name_list[0].replace('job_name=', '') \
if job_name_list else None
step_name = step_name_list[0].replace('step_name=', '') \
if step_name_list else None
instance_name = instance_name_list[0].replace('instance_name=', '') \
if instance_name_list else None

variables['input_file'] = input_file
variables['job_name'] = job_name
variables['step_name'] = step_name
variables['instance_name'] = instance_name
variable_names = ['input_file',
'job_name',
'step_name',
'instance_name',
'set_name',
'field',
'x_field']
tmnp19 marked this conversation as resolved.
Show resolved Hide resolved

for var_name in variable_names:
var_list = [a for a in args if a.startswith(var_name + "=")]
variables[var_name] = var_list[0].replace(var_name + '=', '') if var_list else None

return variables

Expand All @@ -62,13 +54,17 @@ def file_name_func(set_name, variable_name, inp_name):
return file_name


def field_location(i, output_variable, location):
"""It gets the node data of the specified node set.
def field_location(i, variables_array, output_variable, location):
"""It gets the node data of the specified node set. Create a variable that refers to the output
variable of the node set. If the field is S or E it extrapolates the data to the nodes, if the
field is U or RF the data is already on the nodes so it doesn't need extrapolation.

Parameters
----------
i : int
It is an int number, 0 or 1 in the case of the stresses and strains.
It is an int number that represents the index of the variables_array.
variables_array : list
It is a list that contains the field variables (S, U, RF, E or LE)
output_variable : str
Its the output variable (S, U, RF, E or LE) of the nodes.
location : str
Expand All @@ -79,95 +75,154 @@ def field_location(i, output_variable, location):
location_output_variable
Location of the output variable.
"""
if i in (0, 1):
variable = variables_array[i]
if variable in ['S', 'E', 'LE']:
location_output_variable = output_variable.getSubset(region=location,
position=ELEMENT_NODAL)
else:
location_output_variable = output_variable.getSubset(region=location)
return location_output_variable


def main():
"""Main function of the reader.py
"""

variables = input_variables()

# Data defined by the user
job_name = variables["job_name"] # Replace with the actual job name
# Replace with the actual step name
step_name = variables["step_name"]
def get_nlgeom_setting(inp_name):
"""It verifies if the 'nlgeom' setting is set to 'YES' in the input file.

# Read the input file to check if the nlgeom setting is on or off
inp_name = variables["input_file"]
Parameters
----------
inp_name : str
The name of the input file.

Returns
-------
int
It returns 1 if the 'nlgeom' setting is set to 'YES' in the input file, otherwise it
returns 0.

Raises
------
ValueError
Raises an error if the 'nlgeom' setting is not found in the input file.
"""
with codecs.open(inp_name, 'r', encoding='utf-8') as input_file:
file_content = input_file.read()

# Use a regular expression to find the nlgeom setting
match = re.search(r'\*Step.*nlgeom=(\w+)', file_content)

# Check if the match is found and extract the value
if match:
nlgeom_setting = match.group(1)
nlgeom = 1 if nlgeom_setting.upper() == 'YES' else 0
else:
print("nlgeom setting not found in the input file.")
sys.exit(1) # Stop the script with an exit code
return 1 if nlgeom_setting.upper() == 'YES' else 0

if nlgeom == 0:
variables_array = np.array(["S", "E", "U", "RF"])
else:
variables_array = np.array(["S", "LE", "U", "RF"])
raise ValueError("'nlgeom' setting not found in the input file.")

# Open the output database
odb_name = job_name + ".odb"
odb = openOdb(path=odb_name)
def check_nlgeom(nlgeom, field, x_field):
"""Checks if the user is trying to extract the logaritmic strain ('LE') when the 'nlgeom' is
OFF.

# Create a variable that refers to the first step.
step = odb.steps[step_name]
Parameters
----------
nlgeom : int
It is an int number that represents the 'nlgeom' setting. If it is 1 the 'nlgeom' is ON,
if it is 0 it is OFF.
field : str
Name of the y-axis field variable.
x_field : str
Name of the x-axis field variable.

Raises
------
ValueError
Raises an error if the user is trying to extract the logaritmic strain ('LE') when the
'nlgeom' is OFF.
"""
if nlgeom == 0 and (x_field == 'LE' or field == 'LE'):
raise ValueError("'LE' is not allowed when nlgeom is OFF, use 'E' instead.")

for i, var in enumerate(variables_array):
def get_node_sets(instance_name, odb):
"""Gets the node sets of the instance. If the instance_name is None it gets the node sets of the
assembly. If the instance_name is not None it gets the node sets of the instance specified by
the user.

header_variable = "%s_%d"
variable = var
Parameters
----------
instance_name : str
Name of the instance.
odb : Odb
An instance of the Odb class from the Abaqus scripting interface, representing the output
database.

for set_name, location in odb.rootAssembly.nodeSets.items():
Returns
-------
list
List of the node sets of the instance.
"""
if instance_name is not None:
return odb.rootAssembly.instances[instance_name].nodeSets.items()

return odb.rootAssembly.nodeSets.items()

file_name = file_name_func(set_name, var, inp_name)
def write_output_file(i, variables_array, variable, step, location, file_name):
"""Writes the output file with the nodal data of the specified node set.

# Create a text file to save the output data
with codecs.open(file_name, 'w', encoding='utf-8') as output_file:
Parameters
----------
i : int
It is an int number that represents the index of the variables_array.
variables_array : list
It is a list that contains two field variables (S, U, RF, E or LE).
step : str
It is a string that represents the step of the output database.
location : str
It is a string that represents the location of the node set.
file_name : str
It is a string that represents the name of the output file.
"""
with codecs.open(file_name, 'w', encoding='utf-8') as output_file:
output_variable = step.frames[0].fieldOutputs[variable]
location_output_variable = field_location(i, variables_array, output_variable, location)
component_labels = output_variable.componentLabels
# Write the column headers dynamically based on the number of nodes and output
# variable components
header = "Frame " + " ".join("%s_%d" % (label, v.nodeLabel)
for v in location_output_variable.values
for label in component_labels) + "\n"
output_file.write(header)
for frame in step.frames:
output_variable = frame.fieldOutputs[variable]
location_output_variable = field_location(i,
variables_array,
output_variable,
location)
output_file.write("%d " % frame.frameId)
for v in location_output_variable.values:
output_file.write(" ".join("%.9f" % value for value in v.data))
output_file.write(" ")
output_file.write("\n")

output_variable = step.frames[0].fieldOutputs[variable]
def main():
"""Main function to extract the nodal data from the output database (.odb) file.
"""
variables = input_variables()

# Create a variable that refers to the output variable of the node set. If the
# field is S or E it extrapolates the data to the nodes, if the field is U or RF
# the data is already on the nodes so it doesn't need to be specified.
location_output_variable = field_location(i, output_variable, location)
instance_name = variables["instance_name"]
if instance_name is not None:
instance_name = variables["instance_name"].upper()

# Get the component labels
component_labels = output_variable.componentLabels
nlgeom = get_nlgeom_setting(variables["input_file"])
check_nlgeom(nlgeom, variables["field"], variables["x_field"])

# Write the column headers dynamically based on the number of nodes and
# output variable components
header = "Frame " + " ".join(header_variable % (label, v.nodeLabel) for v in
location_output_variable.values for label in
component_labels) + "\n"
output_file.write(header)
variables_array = np.array([variables["field"], variables["x_field"]])

for frame in step.frames:
output_variable = frame.fieldOutputs[variable]
# Open the output database
odb_name = variables["job_name"] + ".odb"
odb = openOdb(path=odb_name)

# Create a variable that refers to the output_variable of the node
# set in the current frame.
location_output_variable = field_location(i, output_variable, location)
# Create a variable that refers to the respective step
step = odb.steps[variables["step_name"]]

output_file.write("%d " % frame.frameId)
for v in location_output_variable.values:
output_file.write(" ".join("%.9f" % value for value in v.data))
output_file.write(" ")
output_file.write("\n")
for i, var in enumerate(variables_array):
node_sets = get_node_sets(instance_name, odb)
for set_name, location in node_sets:
if set_name == str(variables["set_name"]):
file_name = file_name_func(set_name, var, variables["input_file"])
write_output_file(i, variables_array, var, step, location, file_name)

odb.close()

Expand Down
24 changes: 17 additions & 7 deletions piglot/solver/abaqus/solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import numpy as np
from piglot.parameter import ParameterSet
from piglot.solver.solver import Solver, Case, CaseResult, OutputField, OutputResult
from piglot.solver.abaqus.fields import abaqus_fields_reader, AbaqusInputData
from piglot.solver.abaqus.fields import abaqus_fields_reader, AbaqusInputData, FieldsOutput


class AbaqusSolver(Solver):
Expand Down Expand Up @@ -47,13 +47,19 @@ def __init__(
self.tmp_dir = tmp_dir
self.extra_args = extra_args

def _post_proc_variables(self, input_data: AbaqusInputData) -> Dict[str, Any]:
def _post_proc_variables(
self,
input_data: AbaqusInputData,
field_data: FieldsOutput
) -> Dict[str, Any]:
"""Generate the post-processing variables.

Parameters
----------
input_data : AbaqusInputData
Input data for the simulation.
field_data : FieldsOutput
Field data for the simulation.

Returns
-------
Expand All @@ -66,6 +72,9 @@ def _post_proc_variables(self, input_data: AbaqusInputData) -> Dict[str, Any]:
variables['job_name'] = input_data.job_name
variables['step_name'] = input_data.step_name
variables['instance_name'] = input_data.instance_name
variables['set_name'] = field_data.set_name
variables['field'] = field_data.field
variables['x_field'] = field_data.x_field

return variables

Expand Down Expand Up @@ -94,8 +103,7 @@ def _run_case(self, values: np.ndarray, case: Case, tmp_dir: str) -> CaseResult:
os.mkdir(tmp_dir)

# Copy input file replacing parameters by passed value
input_data = case.input_data.prepare(
values, self.parameters, tmp_dir=tmp_dir)
input_data = case.input_data.prepare(values, self.parameters, tmp_dir=tmp_dir)
input_file = input_data.input_file

# Run ABAQUS (we don't use high precision timers here to keep track of the start time)
Expand All @@ -116,19 +124,21 @@ def _run_case(self, values: np.ndarray, case: Case, tmp_dir: str) -> CaseResult:
check=False
)

variables = self._post_proc_variables(input_data)
variables = self._post_proc_variables(input_data, list(case.fields.values())[0])
python_script = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'reader.py')

run_odb = subprocess.run(
[self.abaqus_bin, 'viewer', f"noGUI={python_script}", "--",
f"input_file={variables['input_file']}", "--",
f"job_name={variables['job_name']}", "--",
f"step_name={variables['step_name']}", "--",
f"instance_name={variables['instance_name']}"],
f"instance_name={variables['instance_name']}", "--",
f"set_name={variables['set_name']}", "--",
f"field={variables['field']}", "--",
f"x_field={variables['x_field']}"],
cwd=tmp_dir,
shell=False,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
tmnp19 marked this conversation as resolved.
Show resolved Hide resolved
check=False
)
end_time = time.time()
Expand Down