Skip to content

Commit

Permalink
Optimised Abaqus interface (#24)
Browse files Browse the repository at this point in the history
- Optimised the way the field outputs are written and treated inside
piglot. Now piglot only writes the fields and the set names that are
present in the YAML file and automatically extrapolates to nodes units
that are calculated at the integration points;
- Added errors to check for non-linear fields in linear simulations;
- Fixed some bugs on the implementation of the selected instance on
abaqus;
- Generalised the reading of Abaqus input fiiles.
  • Loading branch information
tmnp19 authored May 30, 2024
1 parent 7f7f27a commit a4458fa
Show file tree
Hide file tree
Showing 3 changed files with 163 additions and 99 deletions.
14 changes: 4 additions & 10 deletions piglot/solver/abaqus/fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
from typing import Dict, Any, List
import os
import re
import glob
import numpy as np
import pandas as pd
from piglot.parameter import ParameterSet
Expand Down Expand Up @@ -97,7 +96,7 @@ def check(self, parameters: ParameterSet) -> None:
"""
# Generate a dummy set of parameters (to ensure proper handling of output parameters)
values = np.array([parameter.inital_value for parameter in parameters])
param_dict = parameters.to_dict(values, input_normalised=False)
param_dict = parameters.to_dict(values)
for name in param_dict:
if not has_parameter(self.input_file, f'<{name}>'):
raise RuntimeError(f"Parameter '{name}' not found in input file.")
Expand All @@ -111,7 +110,8 @@ def check(self, parameters: ParameterSet) -> None:
self.job_name = self.__sanitize_field(self.job_name, job_list, "job")

instance_list = re.findall(r'\*Instance, name=([^,]+)', data)
self.instance_name = self.__sanitize_field(self.instance_name, instance_list,
self.instance_name = self.__sanitize_field(self.instance_name,
instance_list,
"instance")

step_list = re.findall(r'\*Step, name=([^,]+)', data)
Expand Down Expand Up @@ -180,7 +180,7 @@ def check(self, input_data: AbaqusInputData) -> None:
with open(input_file + ext, 'r', encoding='utf-8') as file:
data = file.read()

nsets_list = re.findall(r'\*Nset, nset=([^,]+)', data)
nsets_list = re.findall(r'\*Nset, nset="?([^",\s]+)"?', data)
if len(nsets_list) == 0:
raise ValueError("No sets found in the file.")
if self.set_name not in nsets_list:
Expand Down Expand Up @@ -232,12 +232,6 @@ def get(self, input_data: AbaqusInputData) -> OutputResult:
data_group = data[columns].to_numpy()
y_field = reduction[self.field](data_group, axis=1)

# Delete the extra temporary files
files = glob.glob(output_dir + '/' + input_file + '*.txt')
for file in files:
if self.set_name not in file:
os.remove(file)

return OutputResult(x_field, y_field)

@staticmethod
Expand Down
225 changes: 142 additions & 83 deletions piglot/solver/abaqus/reader.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
"""Module to extract the nodal data from the output database (.odb) file"""
"""Module to extract the nodal data from the output database (.odb) file
Note: This script has older python syntax because it is used in Abaqus, which uses Python 2.7.
"""
import re
import os
import sys
Expand All @@ -18,25 +20,19 @@ def input_variables():
args = sys.argv
variables = {}

input_file_list = [a for a in args if a.startswith("input_file=")]
job_name_list = [a for a in args if a.startswith("job_name=")]
step_name_list = [a for a in args if a.startswith("step_name=")]
instance_name_list = [a for a in args if a.startswith("instance_name=")]

# Checks if the input_file, job_name, step_name and instance_name are not empty
input_file = input_file_list[0].replace('input_file=', '') \
if input_file_list else None
job_name = job_name_list[0].replace('job_name=', '') \
if job_name_list else None
step_name = step_name_list[0].replace('step_name=', '') \
if step_name_list else None
instance_name = instance_name_list[0].replace('instance_name=', '') \
if instance_name_list else None

variables['input_file'] = input_file
variables['job_name'] = job_name
variables['step_name'] = step_name
variables['instance_name'] = instance_name
variable_names = [
'input_file',
'job_name',
'step_name',
'instance_name',
'set_name',
'field',
'x_field',
]

for var_name in variable_names:
var_list = [a for a in args if a.startswith(var_name + "=")]
variables[var_name] = var_list[0].replace(var_name + '=', '') if var_list else None

return variables

Expand All @@ -62,13 +58,17 @@ def file_name_func(set_name, variable_name, inp_name):
return file_name


def field_location(i, output_variable, location):
"""It gets the node data of the specified node set.
def field_location(i, variables_array, output_variable, location):
"""It gets the node data of the specified node set. Create a variable that refers to the output
variable of the node set. If the field is S or E it extrapolates the data to the nodes, if the
field is U or RF the data is already on the nodes so it doesn't need extrapolation.
Parameters
----------
i : int
It is an int number, 0 or 1 in the case of the stresses and strains.
It is an int number that represents the index of the variables_array.
variables_array : list
It is a list that contains the field variables (S, U, RF, E or LE)
output_variable : str
Its the output variable (S, U, RF, E or LE) of the nodes.
location : str
Expand All @@ -79,95 +79,154 @@ def field_location(i, output_variable, location):
location_output_variable
Location of the output variable.
"""
if i in (0, 1):
variable = variables_array[i]
if variable in ['S', 'E', 'LE']:
location_output_variable = output_variable.getSubset(region=location,
position=ELEMENT_NODAL)
else:
location_output_variable = output_variable.getSubset(region=location)
return location_output_variable


def main():
"""Main function of the reader.py
"""

variables = input_variables()

# Data defined by the user
job_name = variables["job_name"] # Replace with the actual job name
# Replace with the actual step name
step_name = variables["step_name"]
def get_nlgeom_setting(inp_name):
"""It verifies if the 'nlgeom' setting is set to 'YES' in the input file.
# Read the input file to check if the nlgeom setting is on or off
inp_name = variables["input_file"]
Parameters
----------
inp_name : str
The name of the input file.
Returns
-------
int
It returns 1 if the 'nlgeom' setting is set to 'YES' in the input file, otherwise it
returns 0.
Raises
------
ValueError
Raises an error if the 'nlgeom' setting is not found in the input file.
"""
with codecs.open(inp_name, 'r', encoding='utf-8') as input_file:
file_content = input_file.read()

# Use a regular expression to find the nlgeom setting
match = re.search(r'\*Step.*nlgeom=(\w+)', file_content)

# Check if the match is found and extract the value
if match:
nlgeom_setting = match.group(1)
nlgeom = 1 if nlgeom_setting.upper() == 'YES' else 0
else:
print("nlgeom setting not found in the input file.")
sys.exit(1) # Stop the script with an exit code
return 1 if nlgeom_setting.upper() == 'YES' else 0

if nlgeom == 0:
variables_array = np.array(["S", "E", "U", "RF"])
else:
variables_array = np.array(["S", "LE", "U", "RF"])
raise ValueError("'nlgeom' setting not found in the input file.")

# Open the output database
odb_name = job_name + ".odb"
odb = openOdb(path=odb_name)
def check_nlgeom(nlgeom, field, x_field):
"""Checks if the user is trying to extract the logaritmic strain ('LE') when the 'nlgeom' is
OFF.
Parameters
----------
nlgeom : int
It is an int number that represents the 'nlgeom' setting. If it is 1 the 'nlgeom' is ON,
if it is 0 it is OFF.
field : str
Name of the y-axis field variable.
x_field : str
Name of the x-axis field variable.
Raises
------
ValueError
Raises an error if the user is trying to extract the logaritmic strain ('LE') when the
'nlgeom' is OFF.
"""
if nlgeom == 0 and (x_field == 'LE' or field == 'LE'):
raise ValueError("'LE' is not allowed when nlgeom is OFF, use 'E' instead.")

# Create a variable that refers to the first step.
step = odb.steps[step_name]
def get_node_sets(instance_name, odb):
"""Gets the node sets of the instance. If the instance_name is None it gets the node sets of the
assembly. If the instance_name is not None it gets the node sets of the instance specified by
the user.
for i, var in enumerate(variables_array):
Parameters
----------
instance_name : str
Name of the instance.
odb : Odb
An instance of the Odb class from the Abaqus scripting interface, representing the output
database.
header_variable = "%s_%d"
variable = var
Returns
-------
list
List of the node sets of the instance.
"""
if instance_name is not None:
return odb.rootAssembly.instances[instance_name].nodeSets.items()

for set_name, location in odb.rootAssembly.nodeSets.items():
return odb.rootAssembly.nodeSets.items()

file_name = file_name_func(set_name, var, inp_name)
def write_output_file(i, variables_array, variable, step, location, file_name):
"""Writes the output file with the nodal data of the specified node set.
# Create a text file to save the output data
with codecs.open(file_name, 'w', encoding='utf-8') as output_file:
Parameters
----------
i : int
It is an int number that represents the index of the variables_array.
variables_array : list
It is a list that contains two field variables (S, U, RF, E or LE).
step : str
It is a string that represents the step of the output database.
location : str
It is a string that represents the location of the node set.
file_name : str
It is a string that represents the name of the output file.
"""
with codecs.open(file_name, 'w', encoding='utf-8') as output_file:
output_variable = step.frames[0].fieldOutputs[variable]
location_output_variable = field_location(i, variables_array, output_variable, location)
component_labels = output_variable.componentLabels
# Write the column headers dynamically based on the number of nodes and output
# variable components
header = "Frame " + " ".join("%s_%d" % (label, v.nodeLabel)
for v in location_output_variable.values
for label in component_labels) + "\n"
output_file.write(header)
for frame in step.frames:
output_variable = frame.fieldOutputs[variable]
location_output_variable = field_location(i,
variables_array,
output_variable,
location)
output_file.write("%d " % frame.frameId)
for v in location_output_variable.values:
output_file.write(" ".join("%.9f" % value for value in v.data))
output_file.write(" ")
output_file.write("\n")

output_variable = step.frames[0].fieldOutputs[variable]
def main():
"""Main function to extract the nodal data from the output database (.odb) file.
"""
variables = input_variables()

# Create a variable that refers to the output variable of the node set. If the
# field is S or E it extrapolates the data to the nodes, if the field is U or RF
# the data is already on the nodes so it doesn't need to be specified.
location_output_variable = field_location(i, output_variable, location)
instance_name = variables["instance_name"]
if instance_name is not None:
instance_name = variables["instance_name"].upper()

# Get the component labels
component_labels = output_variable.componentLabels
nlgeom = get_nlgeom_setting(variables["input_file"])
check_nlgeom(nlgeom, variables["field"], variables["x_field"])

# Write the column headers dynamically based on the number of nodes and
# output variable components
header = "Frame " + " ".join(header_variable % (label, v.nodeLabel) for v in
location_output_variable.values for label in
component_labels) + "\n"
output_file.write(header)
variables_array = np.array([variables["field"], variables["x_field"]])

for frame in step.frames:
output_variable = frame.fieldOutputs[variable]
# Open the output database
odb_name = variables["job_name"] + ".odb"
odb = openOdb(path=odb_name)

# Create a variable that refers to the output_variable of the node
# set in the current frame.
location_output_variable = field_location(i, output_variable, location)
# Create a variable that refers to the respective step
step = odb.steps[variables["step_name"]]

output_file.write("%d " % frame.frameId)
for v in location_output_variable.values:
output_file.write(" ".join("%.9f" % value for value in v.data))
output_file.write(" ")
output_file.write("\n")
for i, var in enumerate(variables_array):
node_sets = get_node_sets(instance_name, odb)
for set_name, location in node_sets:
if set_name == str(variables["set_name"]):
file_name = file_name_func(set_name, var, variables["input_file"])
write_output_file(i, variables_array, var, step, location, file_name)

odb.close()

Expand Down
Loading

0 comments on commit a4458fa

Please sign in to comment.