Skip to content

Commit

Permalink
Merge pull request #30 from ISSMteam/add_update_param
Browse files Browse the repository at this point in the history
add update parameter function
  • Loading branch information
Cheng Gong authored May 20, 2024
2 parents b52c04b + faac2d3 commit 2bba080
Show file tree
Hide file tree
Showing 3 changed files with 117 additions and 67 deletions.
94 changes: 56 additions & 38 deletions pinnicle/pinn.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,49 +14,14 @@ class PINN:
""" a basic PINN model
"""
def __init__(self, params={}, loadFrom=""):
# Step 1: load setup parameters
# load setup parameters
if os.path.exists(loadFrom):
# overwrite params with saved params.json file
params = self.load_setting(path=loadFrom)
self.params = Parameters(params)

# Step 2: set physics, all the rest steps depend on what pdes are included in the model
self.physics = Physics(self.params.physics)
# assign default physic.input_var, output_var, outout_lb, and output_ub to nn
self._update_nn_parameters()

# Step 3: load all avaliable data on the given domain and set up for training data
# domain of the model
self.domain = Domain(self.params.domain)
# create an instance of Data
#self.model_data = DataBase.create(self.params.data.source, parameters=self.params.data)
self.model_data = Data(self.params.data)
# load from data file
self.model_data.load_data()
# update according to the setup: data_size
self.model_data.prepare_training_data()

# Step 4: update training data
self.training_data, self.loss_names, self.params.training.loss_weights, self.params.training.loss_functions = self.update_training_data(self.model_data)

# Step 5: set up deepxde training data object using PDE + data
# deepxde data object
self.dde_data = dde.data.PDE(
self.domain.geometry,
self.physics.pdes,
self.training_data, # all the data loss will be evaluated
num_domain=self.params.domain.num_collocation_points, # collocation points
num_boundary=0, # no need to set for data misfit, unless add calving front boundary, etc.
num_test=None)

# Step 6: set up neural networks
# automate the input scaling according to the domain, this step need to be done before setting up NN
self._update_ub_lb_in_nn(self.model_data)
# define the neural network in use
self.nn = FNN(self.params.nn)

# Step 7: setup the deepxde PINN model
self.model = dde.Model(self.dde_data, self.nn.net)
# set up the model according to self.params
self.setup()

def check_path(self, path, loadOnly=False):
"""check the path, set to default, and create folder if needed
Expand Down Expand Up @@ -140,6 +105,47 @@ def save_setting(self, path=""):
path = self.check_path(path)
save_dict_to_json(self.params.param_dict, path, "params.json")

def setup(self):
""" setup the model according to `self.params` from the constructor
"""
# Step 2: set physics, all the rest steps depend on what pdes are included in the model
self.physics = Physics(self.params.physics)
# assign default physic.input_var, output_var, outout_lb, and output_ub to nn
self._update_nn_parameters()

# Step 3: load all avaliable data on the given domain and set up for training data
# domain of the model
self.domain = Domain(self.params.domain)
# create an instance of Data
#self.model_data = DataBase.create(self.params.data.source, parameters=self.params.data)
self.model_data = Data(self.params.data)
# load from data file
self.model_data.load_data()
# update according to the setup: data_size
self.model_data.prepare_training_data()

# Step 4: update training data
self.training_data, self.loss_names, self.params.training.loss_weights, self.params.training.loss_functions = self.update_training_data(self.model_data)

# Step 5: set up deepxde training data object using PDE + data
# deepxde data object
self.dde_data = dde.data.PDE(
self.domain.geometry,
self.physics.pdes,
self.training_data, # all the data loss will be evaluated
num_domain=self.params.domain.num_collocation_points, # collocation points
num_boundary=0, # no need to set for data misfit, unless add calving front boundary, etc.
num_test=None)

# Step 6: set up neural networks
# automate the input scaling according to the domain, this step need to be done before setting up NN
self._update_ub_lb_in_nn(self.model_data)
# define the neural network in use
self.nn = FNN(self.params.nn)

# Step 7: setup the deepxde PINN model
self.model = dde.Model(self.dde_data, self.nn.net)

def train(self, iterations=0):
""" train the model
"""
Expand Down Expand Up @@ -238,6 +244,18 @@ def update_training_data(self, training_data):

return training_temp, loss_names, loss_weights, loss_functions

def update_parameters(self, params):
""" update self.params according to the input params. If key already exists, update the value; if not, add the pair
"""
# update self.params.param_dict from params
self.params.param_dict.update(params)

# call the constructor
self.params = Parameters(self.params.param_dict)

# setup the model
self.setup()

def _update_nn_parameters(self):
""" assign physic.input_var, output_var, output_lb, and output_ub to nn
"""
Expand Down
21 changes: 21 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -47,3 +47,24 @@ Documentation = "https://pinnicle.readthedocs.io"
[tool.setuptools.packages.find]
where = ["."]
exclude = ["DATA", "Models", "docs*", "examples*"]


[tool.coverage.report]
# Regexes for lines to exclude from consideration
exclude_also = [
# Don't complain about missing debug-only code:
"def __repr__",
"if self\\.debug",

# Don't complain if tests don't hit defensive assertion code:
"raise AssertionError",
"raise NotImplementedError",

# Don't complain about abstract methods, they aren't run:
"@(abc\\.)?abstractmethod",

# Don't complain pytest.mark.skip:
"@pytest.mark.skip",
]

ignore_errors = true
69 changes: 40 additions & 29 deletions tests/test_pinn.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,15 @@ def test_save_and_load_setting(tmp_path):
experiment2 = pinn.PINN(loadFrom=tmp_path)
assert experiment.params.param_dict == experiment2.params.param_dict

def test_update_parameters():
experiment = pinn.PINN(params=hp)
experiment.update_parameters({})
assert experiment.params.param_dict == hp
experiment.update_parameters({"add_param": 1})
assert experiment.params.param_dict["add_param"] == 1
experiment.update_parameters({"add_param": 2})
assert experiment.params.param_dict["add_param"] == 2

def test_train_only_data(tmp_path):
hp["is_parallel"] = False
hp["is_save"] = False
Expand Down Expand Up @@ -140,35 +149,37 @@ def test_train_PFNN(tmp_path):
assert len(experiment.model.net.layers) == 5*(2+1)
assert len(experiment.model.net.trainable_weights) == 30

#def test_save_train(tmp_path):
# hp["save_path"] = str(tmp_path)
# hp["is_save"] = True
# hp["num_collocation_points"] = 100
# issm["data_size"] = {"u":100, "v":100, "s":100, "H":100, "C":None, "vel":100}
# hp["data"] = {"ISSM": issm}
# experiment = pinn.PINN(params=hp)
# experiment.compile()
# experiment.train()
# assert experiment.loss_names == ['fSSA1', 'fSSA2', 'u', 'v', 's', 'H', 'C', "vel log"]
# assert os.path.isfile(f"{tmp_path}/pinn/model-{hp['epochs']}.ckpt.index")
#
#def test_train_with_callbacks(tmp_path):
# hp["save_path"] = str(tmp_path)
# hp["is_save"] = True
# hp["num_collocation_points"] = 100
# issm["data_size"] = {"u":100, "v":100, "s":100, "H":100, "C":None, "vel":100}
# hp["data"] = {"ISSM": issm}
# hp["min_delta"] = 1e10
# hp["period"] = 5
# hp["patience"] = 8
# hp["checkpoint"] = True
# experiment = pinn.PINN(params=hp)
# experiment.compile()
# experiment.train()
# assert experiment.loss_names == ['fSSA1', 'fSSA2', 'u', 'v', 's', 'H', 'C', "vel log"]
# assert os.path.isfile(f"{tmp_path}/pinn/model-1.ckpt.index")
# assert os.path.isfile(f"{tmp_path}/pinn/model-9.ckpt.index")
# assert not os.path.isfile(f"{tmp_path}/pinn/model-{hp['epochs']}.ckpt.index")
@pytest.mark.skip(reason="[tf] change to h5 format")
def test_save_train(tmp_path):
hp["save_path"] = str(tmp_path)
hp["is_save"] = True
hp["num_collocation_points"] = 100
issm["data_size"] = {"u":100, "v":100, "s":100, "H":100, "C":None, "vel":100}
hp["data"] = {"ISSM": issm}
experiment = pinn.PINN(params=hp)
experiment.compile()
experiment.train()
assert experiment.loss_names == ['fSSA1', 'fSSA2', 'u', 'v', 's', 'H', 'C', "vel log"]
assert os.path.isfile(f"{tmp_path}/pinn/model-{hp['epochs']}.ckpt.index")

@pytest.mark.skip(reason="[tf] change to h5 format")
def test_train_with_callbacks(tmp_path):
hp["save_path"] = str(tmp_path)
hp["is_save"] = True
hp["num_collocation_points"] = 100
issm["data_size"] = {"u":100, "v":100, "s":100, "H":100, "C":None, "vel":100}
hp["data"] = {"ISSM": issm}
hp["min_delta"] = 1e10
hp["period"] = 5
hp["patience"] = 8
hp["checkpoint"] = True
experiment = pinn.PINN(params=hp)
experiment.compile()
experiment.train()
assert experiment.loss_names == ['fSSA1', 'fSSA2', 'u', 'v', 's', 'H', 'C', "vel log"]
assert os.path.isfile(f"{tmp_path}/pinn/model-1.ckpt.index")
assert os.path.isfile(f"{tmp_path}/pinn/model-9.ckpt.index")
assert not os.path.isfile(f"{tmp_path}/pinn/model-{hp['epochs']}.ckpt.index")

def test_only_callbacks(tmp_path):
hp["save_path"] = str(tmp_path)
Expand Down

0 comments on commit 2bba080

Please sign in to comment.