batch_enhancement (#51)
This commit is contained in:
@@ -106,6 +106,14 @@ class LabelTensor(torch.Tensor):
|
|||||||
new.data = tmp.data
|
new.data = tmp.data
|
||||||
return new
|
return new
|
||||||
|
|
||||||
|
def select(self, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Performs Tensor selection. For more details, see :meth:`torch.Tensor.select`.
|
||||||
|
"""
|
||||||
|
tmp = super().select(*args, **kwargs)
|
||||||
|
tmp._labels = self._labels
|
||||||
|
return tmp
|
||||||
|
|
||||||
def extract(self, label_to_extract):
|
def extract(self, label_to_extract):
|
||||||
"""
|
"""
|
||||||
Extract the subset of the original tensor by returning all the columns
|
Extract the subset of the original tensor by returning all the columns
|
||||||
|
|||||||
111
pina/pinn.py
111
pina/pinn.py
@@ -3,7 +3,8 @@ import torch
|
|||||||
|
|
||||||
from .problem import AbstractProblem
|
from .problem import AbstractProblem
|
||||||
from .label_tensor import LabelTensor
|
from .label_tensor import LabelTensor
|
||||||
from .utils import merge_tensors
|
from .utils import merge_tensors, PinaDataset
|
||||||
|
|
||||||
|
|
||||||
torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732
|
torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732
|
||||||
|
|
||||||
@@ -16,17 +17,27 @@ class PINN(object):
|
|||||||
optimizer=torch.optim.Adam,
|
optimizer=torch.optim.Adam,
|
||||||
lr=0.001,
|
lr=0.001,
|
||||||
regularizer=0.00001,
|
regularizer=0.00001,
|
||||||
|
batch_size=None,
|
||||||
dtype=torch.float32,
|
dtype=torch.float32,
|
||||||
device='cpu',
|
device='cpu',
|
||||||
error_norm='mse'):
|
error_norm='mse'):
|
||||||
'''
|
'''
|
||||||
:param Problem problem: the formualation of the problem.
|
:param Problem problem: the formualation of the problem.
|
||||||
:param torch.nn.Module model: the neural network model to use.
|
:param torch.nn.Module model: the neural network model to use.
|
||||||
|
:param torch.optim optimizer: the neural network optimizer to use;
|
||||||
|
default is `torch.optim.Adam`.
|
||||||
:param float lr: the learning rate; default is 0.001.
|
:param float lr: the learning rate; default is 0.001.
|
||||||
:param float regularizer: the coefficient for L2 regularizer term.
|
:param float regularizer: the coefficient for L2 regularizer term.
|
||||||
:param type dtype: the data type to use for the model. Valid option are
|
:param type dtype: the data type to use for the model. Valid option are
|
||||||
`torch.float32` and `torch.float64` (`torch.float16` only on GPU);
|
`torch.float32` and `torch.float64` (`torch.float16` only on GPU);
|
||||||
default is `torch.float64`.
|
default is `torch.float64`.
|
||||||
|
:param string device: the device used for training; default 'cpu'
|
||||||
|
option include 'cuda' if cuda is available.
|
||||||
|
:param string/int error_norm: the loss function used as minimizer,
|
||||||
|
default mean square error 'mse'. If string options include mean
|
||||||
|
error 'me' and mean square error 'mse'. If int, the p-norm is
|
||||||
|
calculated where p is specifined by the int input.
|
||||||
|
:param int batch_size: batch size for the dataloader; default 5.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
if dtype == torch.float64:
|
if dtype == torch.float64:
|
||||||
@@ -59,6 +70,9 @@ class PINN(object):
|
|||||||
self.optimizer = optimizer(
|
self.optimizer = optimizer(
|
||||||
self.model.parameters(), lr=lr, weight_decay=regularizer)
|
self.model.parameters(), lr=lr, weight_decay=regularizer)
|
||||||
|
|
||||||
|
self.batch_size = batch_size
|
||||||
|
self.data_set = PinaDataset(self)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def problem(self):
|
def problem(self):
|
||||||
return self._problem
|
return self._problem
|
||||||
@@ -79,7 +93,7 @@ class PINN(object):
|
|||||||
:param vec torch.tensor: the tensor
|
:param vec torch.tensor: the tensor
|
||||||
"""
|
"""
|
||||||
if isinstance(self.error_norm, int):
|
if isinstance(self.error_norm, int):
|
||||||
return torch.linalg.vector_norm(vec, ord = self.error_norm, dtype=self.dytpe)
|
return torch.linalg.vector_norm(vec, ord=self.error_norm, dtype=self.dytpe)
|
||||||
elif self.error_norm == 'mse':
|
elif self.error_norm == 'mse':
|
||||||
return torch.mean(vec.pow(2))
|
return torch.mean(vec.pow(2))
|
||||||
elif self.error_norm == 'me':
|
elif self.error_norm == 'me':
|
||||||
@@ -92,14 +106,14 @@ class PINN(object):
|
|||||||
checkpoint = {
|
checkpoint = {
|
||||||
'epoch': self.trained_epoch,
|
'epoch': self.trained_epoch,
|
||||||
'model_state': self.model.state_dict(),
|
'model_state': self.model.state_dict(),
|
||||||
'optimizer_state' : self.optimizer.state_dict(),
|
'optimizer_state': self.optimizer.state_dict(),
|
||||||
'optimizer_class' : self.optimizer.__class__,
|
'optimizer_class': self.optimizer.__class__,
|
||||||
'history' : self.history_loss,
|
'history': self.history_loss,
|
||||||
'input_points_dict' : self.input_pts,
|
'input_points_dict': self.input_pts,
|
||||||
}
|
}
|
||||||
|
|
||||||
# TODO save also architecture param?
|
# TODO save also architecture param?
|
||||||
#if isinstance(self.model, DeepFeedForward):
|
# if isinstance(self.model, DeepFeedForward):
|
||||||
# checkpoint['model_class'] = self.model.__class__
|
# checkpoint['model_class'] = self.model.__class__
|
||||||
# checkpoint['model_structure'] = {
|
# checkpoint['model_structure'] = {
|
||||||
# }
|
# }
|
||||||
@@ -110,7 +124,6 @@ class PINN(object):
|
|||||||
checkpoint = torch.load(filename)
|
checkpoint = torch.load(filename)
|
||||||
self.model.load_state_dict(checkpoint['model_state'])
|
self.model.load_state_dict(checkpoint['model_state'])
|
||||||
|
|
||||||
|
|
||||||
self.optimizer = checkpoint['optimizer_class'](self.model.parameters())
|
self.optimizer = checkpoint['optimizer_class'](self.model.parameters())
|
||||||
self.optimizer.load_state_dict(checkpoint['optimizer_state'])
|
self.optimizer.load_state_dict(checkpoint['optimizer_state'])
|
||||||
|
|
||||||
@@ -121,6 +134,39 @@ class PINN(object):
|
|||||||
|
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
def _create_dataloader(self):
|
||||||
|
"""Private method for creating dataloader
|
||||||
|
|
||||||
|
:return: dataloader
|
||||||
|
:rtype: torch.utils.data.DataLoader
|
||||||
|
"""
|
||||||
|
if self.batch_size is None:
|
||||||
|
return [self.input_pts]
|
||||||
|
|
||||||
|
def custom_collate(batch):
|
||||||
|
# extracting pts labels
|
||||||
|
_, pts = list(batch[0].items())[0]
|
||||||
|
labels = pts.labels
|
||||||
|
# calling default torch collate
|
||||||
|
collate_res = default_collate(batch)
|
||||||
|
# save collate result in dict
|
||||||
|
res = {}
|
||||||
|
for key, val in collate_res.items():
|
||||||
|
val.labels = labels
|
||||||
|
res[key] = val
|
||||||
|
return res
|
||||||
|
|
||||||
|
# creating dataset, list of dataset for each location
|
||||||
|
datasets = [MyDataSet(key, val)
|
||||||
|
for key, val in self.input_pts.items()]
|
||||||
|
# creating dataloader
|
||||||
|
dataloaders = [DataLoader(dataset=dat,
|
||||||
|
batch_size=self.batch_size,
|
||||||
|
collate_fn=custom_collate)
|
||||||
|
for dat in datasets]
|
||||||
|
|
||||||
|
return dict(zip(self.input_pts.keys(), dataloaders))
|
||||||
|
|
||||||
def span_pts(self, *args, **kwargs):
|
def span_pts(self, *args, **kwargs):
|
||||||
"""
|
"""
|
||||||
>>> pinn.span_pts(n=10, mode='grid')
|
>>> pinn.span_pts(n=10, mode='grid')
|
||||||
@@ -160,55 +206,65 @@ class PINN(object):
|
|||||||
|
|
||||||
# TODO
|
# TODO
|
||||||
# pts = pts.double()
|
# pts = pts.double()
|
||||||
pts = pts.to(dtype=self.dtype, device=self.device)
|
|
||||||
pts.requires_grad_(True)
|
|
||||||
pts.retain_grad()
|
|
||||||
|
|
||||||
self.input_pts[location] = pts
|
self.input_pts[location] = pts
|
||||||
|
|
||||||
def train(self, stop=100, frequency_print=2, save_loss=1, trial=None):
|
def train(self, stop=100, frequency_print=2, save_loss=1, trial=None):
|
||||||
|
|
||||||
epoch = 0
|
epoch = 0
|
||||||
|
data_loader = self.data_set.dataloader
|
||||||
|
|
||||||
header = []
|
header = []
|
||||||
for condition_name in self.problem.conditions:
|
for condition_name in self.problem.conditions:
|
||||||
condition = self.problem.conditions[condition_name]
|
condition = self.problem.conditions[condition_name]
|
||||||
|
|
||||||
if (hasattr(condition, 'function') and
|
if hasattr(condition, 'function'):
|
||||||
isinstance(condition.function, list)):
|
if isinstance(condition.function, list):
|
||||||
for function in condition.function:
|
for function in condition.function:
|
||||||
header.append(f'{condition_name}{function.__name__}')
|
header.append(f'{condition_name}{function.__name__}')
|
||||||
else:
|
|
||||||
|
continue
|
||||||
|
|
||||||
header.append(f'{condition_name}')
|
header.append(f'{condition_name}')
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
|
||||||
losses = []
|
losses = []
|
||||||
|
|
||||||
for condition_name in self.problem.conditions:
|
for condition_name in self.problem.conditions:
|
||||||
condition = self.problem.conditions[condition_name]
|
condition = self.problem.conditions[condition_name]
|
||||||
|
|
||||||
|
for batch in data_loader[condition_name]:
|
||||||
|
|
||||||
|
single_loss = []
|
||||||
|
|
||||||
if hasattr(condition, 'function'):
|
if hasattr(condition, 'function'):
|
||||||
pts = self.input_pts[condition_name]
|
pts = batch[condition_name]
|
||||||
|
pts = pts.to(dtype=self.dtype, device=self.device)
|
||||||
|
pts.requires_grad_(True)
|
||||||
|
pts.retain_grad()
|
||||||
|
|
||||||
predicted = self.model(pts)
|
predicted = self.model(pts)
|
||||||
for function in condition.function:
|
for function in condition.function:
|
||||||
residuals = function(pts, predicted)
|
residuals = function(pts, predicted)
|
||||||
local_loss = (
|
local_loss = (
|
||||||
condition.data_weight*self._compute_norm(
|
condition.data_weight*self._compute_norm(
|
||||||
residuals))
|
residuals))
|
||||||
losses.append(local_loss)
|
single_loss.append(local_loss)
|
||||||
elif hasattr(condition, 'output_points'):
|
elif hasattr(condition, 'output_points'):
|
||||||
pts = condition.input_points
|
pts = condition.input_points.to(
|
||||||
|
dtype=self.dtype, device=self.device)
|
||||||
predicted = self.model(pts)
|
predicted = self.model(pts)
|
||||||
residuals = predicted - condition.output_points
|
residuals = predicted - condition.output_points
|
||||||
local_loss = (
|
local_loss = (
|
||||||
condition.data_weight*self._compute_norm(residuals))
|
condition.data_weight*self._compute_norm(residuals))
|
||||||
losses.append(local_loss)
|
single_loss.append(local_loss)
|
||||||
|
|
||||||
self.optimizer.zero_grad()
|
self.optimizer.zero_grad()
|
||||||
|
sum(single_loss).backward()
|
||||||
sum(losses).backward()
|
|
||||||
self.optimizer.step()
|
self.optimizer.step()
|
||||||
|
|
||||||
|
losses.append(sum(single_loss))
|
||||||
|
|
||||||
if save_loss and (epoch % save_loss == 0 or epoch == 0):
|
if save_loss and (epoch % save_loss == 0 or epoch == 0):
|
||||||
self.history_loss[epoch] = [
|
self.history_loss[epoch] = [
|
||||||
loss.detach().item() for loss in losses]
|
loss.detach().item() for loss in losses]
|
||||||
@@ -221,7 +277,8 @@ class PINN(object):
|
|||||||
|
|
||||||
if isinstance(stop, int):
|
if isinstance(stop, int):
|
||||||
if epoch == stop:
|
if epoch == stop:
|
||||||
print('[epoch {:05d}] {:.6e} '.format(self.trained_epoch, sum(losses).item()), end='')
|
print('[epoch {:05d}] {:.6e} '.format(
|
||||||
|
self.trained_epoch, sum(losses).item()), end='')
|
||||||
for loss in losses:
|
for loss in losses:
|
||||||
print('{:.6e} '.format(loss.item()), end='')
|
print('{:.6e} '.format(loss.item()), end='')
|
||||||
print()
|
print()
|
||||||
@@ -236,7 +293,8 @@ class PINN(object):
|
|||||||
print('{:12.12s} '.format(name), end='')
|
print('{:12.12s} '.format(name), end='')
|
||||||
print()
|
print()
|
||||||
|
|
||||||
print('[epoch {:05d}] {:.6e} '.format(self.trained_epoch, sum(losses).item()), end='')
|
print('[epoch {:05d}] {:.6e} '.format(
|
||||||
|
self.trained_epoch, sum(losses).item()), end='')
|
||||||
for loss in losses:
|
for loss in losses:
|
||||||
print('{:.6e} '.format(loss.item()), end='')
|
print('{:.6e} '.format(loss.item()), end='')
|
||||||
print()
|
print()
|
||||||
@@ -246,7 +304,6 @@ class PINN(object):
|
|||||||
|
|
||||||
return sum(losses).item()
|
return sum(losses).item()
|
||||||
|
|
||||||
|
|
||||||
def error(self, dtype='l2', res=100):
|
def error(self, dtype='l2', res=100):
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -261,7 +318,8 @@ class PINN(object):
|
|||||||
grids_container = self.problem.data_solution['grid']
|
grids_container = self.problem.data_solution['grid']
|
||||||
Z_true = self.problem.data_solution['grid_solution']
|
Z_true = self.problem.data_solution['grid_solution']
|
||||||
try:
|
try:
|
||||||
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T.to(dtype=self.dtype, device=self.device)
|
unrolled_pts = torch.tensor([t.flatten() for t in grids_container]).T.to(
|
||||||
|
dtype=self.dtype, device=self.device)
|
||||||
Z_pred = self.model(unrolled_pts)
|
Z_pred = self.model(unrolled_pts)
|
||||||
Z_pred = Z_pred.detach().numpy().reshape(grids_container[0].shape)
|
Z_pred = Z_pred.detach().numpy().reshape(grids_container[0].shape)
|
||||||
|
|
||||||
@@ -273,4 +331,5 @@ class PINN(object):
|
|||||||
except:
|
except:
|
||||||
print("")
|
print("")
|
||||||
print("Something went wrong...")
|
print("Something went wrong...")
|
||||||
print("Not able to compute the error. Please pass a data solution or a true solution")
|
print(
|
||||||
|
"Not able to compute the error. Please pass a data solution or a true solution")
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
"""Utils module"""
|
"""Utils module"""
|
||||||
from functools import reduce
|
from functools import reduce
|
||||||
|
import torch
|
||||||
|
from torch.utils.data import DataLoader, default_collate, ConcatDataset
|
||||||
|
|
||||||
from .label_tensor import LabelTensor
|
from .label_tensor import LabelTensor
|
||||||
|
|
||||||
|
|
||||||
def number_parameters(model, aggregate=True, only_trainable=True): #TODO: check
|
def number_parameters(model, aggregate=True, only_trainable=True): # TODO: check
|
||||||
"""
|
"""
|
||||||
Return the number of parameters of a given `model`.
|
Return the number of parameters of a given `model`.
|
||||||
|
|
||||||
@@ -45,3 +47,65 @@ def merge_two_tensors(tensor1, tensor2):
|
|||||||
tensor2 = LabelTensor(tensor2.repeat_interleave(n1, dim=0),
|
tensor2 = LabelTensor(tensor2.repeat_interleave(n1, dim=0),
|
||||||
labels=tensor2.labels)
|
labels=tensor2.labels)
|
||||||
return tensor1.append(tensor2)
|
return tensor1.append(tensor2)
|
||||||
|
|
||||||
|
|
||||||
|
class PinaDataset():
|
||||||
|
|
||||||
|
def __init__(self, pinn) -> None:
|
||||||
|
self.pinn = pinn
|
||||||
|
|
||||||
|
@property
|
||||||
|
def dataloader(self):
|
||||||
|
return self._create_dataloader()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def dataset(self):
|
||||||
|
return [self.SampleDataset(key, val)
|
||||||
|
for key, val in self.input_pts.items()]
|
||||||
|
|
||||||
|
def _create_dataloader(self):
|
||||||
|
"""Private method for creating dataloader
|
||||||
|
|
||||||
|
:return: dataloader
|
||||||
|
:rtype: torch.utils.data.DataLoader
|
||||||
|
"""
|
||||||
|
if self.pinn.batch_size is None:
|
||||||
|
return {key: [{key: val}] for key, val in self.pinn.input_pts.items()}
|
||||||
|
|
||||||
|
def custom_collate(batch):
|
||||||
|
# extracting pts labels
|
||||||
|
_, pts = list(batch[0].items())[0]
|
||||||
|
labels = pts.labels
|
||||||
|
# calling default torch collate
|
||||||
|
collate_res = default_collate(batch)
|
||||||
|
# save collate result in dict
|
||||||
|
res = {}
|
||||||
|
for key, val in collate_res.items():
|
||||||
|
val.labels = labels
|
||||||
|
res[key] = val
|
||||||
|
return res
|
||||||
|
|
||||||
|
# creating dataset, list of dataset for each location
|
||||||
|
datasets = [self.SampleDataset(key, val)
|
||||||
|
for key, val in self.pinn.input_pts.items()]
|
||||||
|
# creating dataloader
|
||||||
|
dataloaders = [DataLoader(dataset=dat,
|
||||||
|
batch_size=self.pinn.batch_size,
|
||||||
|
collate_fn=custom_collate)
|
||||||
|
for dat in datasets]
|
||||||
|
|
||||||
|
return dict(zip(self.pinn.input_pts.keys(), dataloaders))
|
||||||
|
|
||||||
|
class SampleDataset(torch.utils.data.Dataset):
|
||||||
|
|
||||||
|
def __init__(self, location, tensor):
|
||||||
|
self._tensor = tensor
|
||||||
|
self._location = location
|
||||||
|
self._len = len(tensor)
|
||||||
|
|
||||||
|
def __getitem__(self, index):
|
||||||
|
tensor = self._tensor.select(0, index)
|
||||||
|
return {self._location: tensor}
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return self._len
|
||||||
|
|||||||
@@ -31,19 +31,22 @@ class Poisson(SpatialProblem):
|
|||||||
|
|
||||||
def poisson_sol(self, pts):
|
def poisson_sol(self, pts):
|
||||||
return -(
|
return -(
|
||||||
torch.sin(pts.extract(['x'])*torch.pi)*
|
torch.sin(pts.extract(['x'])*torch.pi) *
|
||||||
torch.sin(pts.extract(['y'])*torch.pi)
|
torch.sin(pts.extract(['y'])*torch.pi)
|
||||||
)/(2*torch.pi**2)
|
)/(2*torch.pi**2)
|
||||||
|
|
||||||
truth_solution = poisson_sol
|
truth_solution = poisson_sol
|
||||||
|
|
||||||
|
|
||||||
problem = Poisson()
|
problem = Poisson()
|
||||||
|
|
||||||
model = FeedForward(problem.input_variables, problem.output_variables)
|
model = FeedForward(problem.input_variables, problem.output_variables)
|
||||||
|
|
||||||
|
|
||||||
def test_constructor():
|
def test_constructor():
|
||||||
PINN(problem, model)
|
PINN(problem, model)
|
||||||
|
|
||||||
|
|
||||||
def test_span_pts():
|
def test_span_pts():
|
||||||
pinn = PINN(problem, model)
|
pinn = PINN(problem, model)
|
||||||
n = 10
|
n = 10
|
||||||
@@ -60,6 +63,7 @@ def test_span_pts():
|
|||||||
pinn.span_pts(n, 'random', locations=['D'])
|
pinn.span_pts(n, 'random', locations=['D'])
|
||||||
assert pinn.input_pts['D'].shape[0] == n
|
assert pinn.input_pts['D'].shape[0] == n
|
||||||
|
|
||||||
|
|
||||||
def test_train():
|
def test_train():
|
||||||
pinn = PINN(problem, model)
|
pinn = PINN(problem, model)
|
||||||
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||||
@@ -68,6 +72,7 @@ def test_train():
|
|||||||
pinn.span_pts(n, 'grid', locations=['D'])
|
pinn.span_pts(n, 'grid', locations=['D'])
|
||||||
pinn.train(5)
|
pinn.train(5)
|
||||||
|
|
||||||
|
|
||||||
def test_train():
|
def test_train():
|
||||||
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||||
n = 10
|
n = 10
|
||||||
@@ -79,3 +84,44 @@ def test_train():
|
|||||||
pinn.span_pts(n, 'grid', locations=['D'])
|
pinn.span_pts(n, 'grid', locations=['D'])
|
||||||
pinn.train(50, save_loss=i)
|
pinn.train(50, save_loss=i)
|
||||||
assert list(pinn.history_loss.keys()) == truth_key
|
assert list(pinn.history_loss.keys()) == truth_key
|
||||||
|
|
||||||
|
|
||||||
|
def test_train_batch():
|
||||||
|
pinn = PINN(problem, model, batch_size=6)
|
||||||
|
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||||
|
n = 10
|
||||||
|
pinn.span_pts(n, 'grid', boundaries)
|
||||||
|
pinn.span_pts(n, 'grid', locations=['D'])
|
||||||
|
pinn.train(5)
|
||||||
|
|
||||||
|
|
||||||
|
def test_train_batch():
|
||||||
|
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||||
|
n = 10
|
||||||
|
expected_keys = [[], list(range(0, 50, 3))]
|
||||||
|
param = [0, 3]
|
||||||
|
for i, truth_key in zip(param, expected_keys):
|
||||||
|
pinn = PINN(problem, model, batch_size=6)
|
||||||
|
pinn.span_pts(n, 'grid', boundaries)
|
||||||
|
pinn.span_pts(n, 'grid', locations=['D'])
|
||||||
|
pinn.train(50, save_loss=i)
|
||||||
|
assert list(pinn.history_loss.keys()) == truth_key
|
||||||
|
|
||||||
|
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
|
||||||
|
def test_gpu_train():
|
||||||
|
pinn = PINN(problem, model, batch_size=20, device='cuda')
|
||||||
|
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||||
|
n = 100
|
||||||
|
pinn.span_pts(n, 'grid', boundaries)
|
||||||
|
pinn.span_pts(n, 'grid', locations=['D'])
|
||||||
|
pinn.train(5)
|
||||||
|
|
||||||
|
def test_gpu_train_nobatch():
|
||||||
|
pinn = PINN(problem, model, batch_size=None, device='cuda')
|
||||||
|
boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4']
|
||||||
|
n = 100
|
||||||
|
pinn.span_pts(n, 'grid', boundaries)
|
||||||
|
pinn.span_pts(n, 'grid', locations=['D'])
|
||||||
|
pinn.train(5)
|
||||||
|
|||||||
Reference in New Issue
Block a user