|
| 1 | +#!/usr/bin/env python |
| 2 | +# coding: utf-8 |
| 3 | + |
| 4 | +# # Tutorial: Introductory Tutorial: Physics Informed Neural Networks with PINA |
| 5 | +# [](https://colab.research.google.com/github/mathLab/PINA/blob/master/tutorials/tutorial1/tutorial.ipynb) |
| 6 | +# |
| 7 | +# > ##### ⚠️ ***Before starting:*** |
| 8 | +# > We assume you are already familiar with the concepts covered in the [Getting started with PINA](https://mathlab.github.io/PINA/_tutorial.html#getting-started-with-pina) tutorials. If not, we strongly recommend reviewing them before exploring this advanced topic. |
| 9 | +# |
| 10 | + |
| 11 | +# In this tutorial, we will demonstrate a typical use case of **PINA** for Physics Informed Neural Network (PINN) training. We will cover the basics of training a PINN with PINA, if you want to go further into PINNs look at our dedicated [tutorials](https://mathlab.github.io/PINA/_tutorial.html#physics-informed-neural-networks) on the topic. |
| 12 | +# |
| 13 | +# Let's start by importing the useful modules: |
| 14 | + |
| 15 | +# In[ ]: |
| 16 | + |
| 17 | + |
| 18 | +## routine needed to run the notebook on Google Colab |
| 19 | +try: |
| 20 | + import google.colab |
| 21 | + |
| 22 | + IN_COLAB = True |
| 23 | +except: |
| 24 | + IN_COLAB = False |
| 25 | +if IN_COLAB: |
| 26 | + get_ipython().system('pip install "pina-mathlab[tutorial]"') |
| 27 | + |
| 28 | +import warnings |
| 29 | +import torch |
| 30 | +import matplotlib.pyplot as plt |
| 31 | + |
| 32 | +from pina import Trainer, Condition |
| 33 | +from pina.problem import SpatialProblem |
| 34 | +from pina.operator import grad |
| 35 | +from pina.solver import PINN |
| 36 | +from pina.model import FeedForward |
| 37 | +from pina.optim import TorchOptimizer |
| 38 | +from pina.domain import CartesianDomain |
| 39 | +from pina.callback import MetricTracker |
| 40 | +from pina.equation import Equation, FixedValue |
| 41 | + |
| 42 | +warnings.filterwarnings("ignore") |
| 43 | + |
| 44 | + |
| 45 | +# ## Build the problem |
| 46 | +# |
| 47 | +# We will use a simple Ordinary Differential Equation as pedagogical example: |
| 48 | +# |
| 49 | +# $$ |
| 50 | +# \begin{equation} |
| 51 | +# \begin{cases} |
| 52 | +# \frac{d}{dx}u(x) &= u(x) \quad x\in(0,1)\\ |
| 53 | +# u(x=0) &= 1 \\ |
| 54 | +# \end{cases} |
| 55 | +# \end{equation} |
| 56 | +# $$ |
| 57 | +# |
| 58 | +# with the analytical solution $u(x) = e^x$. |
| 59 | +# |
| 60 | +# The PINA problem is easly written as: |
| 61 | + |
| 62 | +# In[2]: |
| 63 | + |
| 64 | + |
| 65 | +def ode_equation(input_, output_): |
| 66 | + u_x = grad(output_, input_, components=["u"], d=["x"]) |
| 67 | + u = output_.extract(["u"]) |
| 68 | + return u_x - u |
| 69 | + |
| 70 | + |
| 71 | +class SimpleODE(SpatialProblem): |
| 72 | + |
| 73 | + output_variables = ["u"] |
| 74 | + spatial_domain = CartesianDomain({"x": [0, 1]}) |
| 75 | + |
| 76 | + domains = { |
| 77 | + "x0": CartesianDomain({"x": 0.0}), |
| 78 | + "D": CartesianDomain({"x": [0, 1]}), |
| 79 | + } |
| 80 | + |
| 81 | + conditions = { |
| 82 | + "bound_cond": Condition(domain="x0", equation=FixedValue(1.0)), |
| 83 | + "phys_cond": Condition(domain="D", equation=Equation(ode_equation)), |
| 84 | + } |
| 85 | + |
| 86 | + def solution(self, pts): |
| 87 | + return torch.exp(pts.extract(["x"])) |
| 88 | + |
| 89 | + |
| 90 | +problem = SimpleODE() |
| 91 | + |
| 92 | + |
| 93 | +# We are going to use latin hypercube points for sampling. We need to sample in all the conditions domains. In our case we sample in domain `D` and `x0`: |
| 94 | + |
| 95 | +# In[3]: |
| 96 | + |
| 97 | + |
| 98 | +# sampling for training |
| 99 | +problem.discretise_domain(1, "lh", domains=["x0"]) |
| 100 | +problem.discretise_domain(20, "lh", domains=["D"]) |
| 101 | + |
| 102 | + |
| 103 | +# ## Generate data |
| 104 | +# |
| 105 | +# Data for training can come in form of direct numerical simulation results, or points in the domains. In case we perform unsupervised learning, we just need the collocation points for training, i.e. points where we want to evaluate the neural network. Sampling point in **PINA** is very easy, here we show three examples using the `.discretise_domain` method of the `AbstractProblem` class. |
| 106 | + |
| 107 | +# In[4]: |
| 108 | + |
| 109 | + |
| 110 | +# sampling 20 points in [0, 1] through discretization in all locations |
| 111 | +problem.discretise_domain(n=20, mode="grid", domains="all") |
| 112 | + |
| 113 | +# sampling 20 points in (0, 1) through latin hypercube sampling in D, and 1 point in x0 |
| 114 | +problem.discretise_domain(n=20, mode="latin", domains=["D"]) |
| 115 | +problem.discretise_domain(n=1, mode="random", domains=["x0"]) |
| 116 | + |
| 117 | +# sampling 20 points in (0, 1) randomly |
| 118 | +problem.discretise_domain(n=20, mode="random") |
| 119 | + |
| 120 | + |
| 121 | +# We are going to use latin hypercube points for sampling. We need to sample in all the conditions domains. In our case we sample in `D` and `x0`. |
| 122 | + |
| 123 | +# In[5]: |
| 124 | + |
| 125 | + |
| 126 | +# sampling for training |
| 127 | +problem.discretise_domain(1, "random", domains=["x0"]) |
| 128 | +problem.discretise_domain(20, "lh", domains=["D"]) |
| 129 | + |
| 130 | + |
| 131 | +# To visualize the sampled points we can use `matplotlib.pyplot`: |
| 132 | + |
| 133 | +# In[6]: |
| 134 | + |
| 135 | + |
| 136 | +for location in problem.input_pts: |
| 137 | + coords = ( |
| 138 | + problem.input_pts[location].extract(problem.spatial_variables).flatten() |
| 139 | + ) |
| 140 | + plt.scatter(coords, torch.zeros_like(coords), s=10, label=location) |
| 141 | +_ = plt.legend() |
| 142 | + |
| 143 | + |
| 144 | +# ## Easily solve a Physics Problem with three step pipeline |
| 145 | + |
| 146 | +# Once the problem is defined and the data is generated, we can move on to modeling. This process consists of three key steps: |
| 147 | +# |
| 148 | +# **Choosing a Model** |
| 149 | +# - Select a neural network architecture. You can use the model we provide in the `pina.model` module (see [here](https://mathlab.github.io/PINA/_rst/_code.html#models) for a full list), or define a custom PyTorch module (more on this [here](https://pytorch.org/docs/stable/notes/modules.html)). |
| 150 | +# |
| 151 | +# **Choosing a PINN Solver & Defining the Trainer** |
| 152 | +# * Use a Physics Informed solver from `pina.solver` module to solve the problem using the specified model. We have already implemented most State-Of-The-Arte solvers for you, [have a look](https://mathlab.github.io/PINA/_rst/_code.html#solvers) if interested. Today we will use the standard `PINN` solver. |
| 153 | +# |
| 154 | +# **Training** |
| 155 | +# * Train the model with the [`Trainer`](https://mathlab.github.io/PINA/_rst/trainer.html) class. The Trainer class provides powerful features to enhance model accuracy, optimize training time and memory, and simplify logging and visualization, thanks to PyTorch Lightning's excellent work, see [our dedicated tutorial](https://mathlab.github.io/PINA/tutorial11/tutorial.html) for further details. By default, training metrics (e.g., MSE error) are logged using a lightning logger (CSVLogger). If you prefer manual tracking, use `pina.callback.MetricTracker`. |
| 156 | +# |
| 157 | +# Let's cover all steps one by one! |
| 158 | +# |
| 159 | +# First we build the model, in this case a FeedForward neural network, with two layers of size 10 and hyperbolic tangent activation: |
| 160 | + |
| 161 | +# In[7]: |
| 162 | + |
| 163 | + |
| 164 | +# build the model |
| 165 | +model = FeedForward( |
| 166 | + layers=[10, 10], |
| 167 | + func=torch.nn.Tanh, |
| 168 | + output_dimensions=len(problem.output_variables), |
| 169 | + input_dimensions=len(problem.input_variables), |
| 170 | +) |
| 171 | + |
| 172 | + |
| 173 | +# Then we build the solver. The Physics-Informed Neural Network (`PINN`) solver class needs to be initialised with a `model` and a specific `problem` to be solved. They also take extra arguments, as the optimizer, scheduler, loss type and weighting for the different conditions which are all set to their defualt values. |
| 174 | +# |
| 175 | +# >##### 💡***Bonus tip:*** |
| 176 | +# > All physics solvers in PINA can handle both forward and inverse problems without requiring any changes to the model or solver structure! See [our tutorial](https://mathlab.github.io/PINA/tutorial7/tutorial.html) of inverse problems for more infos. |
| 177 | + |
| 178 | +# In[8]: |
| 179 | + |
| 180 | + |
| 181 | +# create the PINN object with RAdam Optimizer, notice that Optimizer need to |
| 182 | +# be wrapped with the pina.optim.TorchOptimizer class |
| 183 | +pinn = PINN(problem, model, TorchOptimizer(torch.optim.RAdam, lr=0.005)) |
| 184 | + |
| 185 | + |
| 186 | +# Finally, we train the model using the Trainer API. The trainer offers various options to customize your training, refer to the official documentation for details. Here, we highlight the `MetricTracker` from `pina.callback`, which helps track metrics during training. In order to train just call the `.train()` method. |
| 187 | +# |
| 188 | +# > ##### ⚠️ ***Important Note:*** |
| 189 | +# > In PINA you can log metrics in different ways. The simplest approach is to use the `MetricTraker` class from `pina.callbacks` as we will see today. However, expecially when we need to train multiple times to get an average of the loss across multiple runs, we suggest to use `lightning.pytorch.loggers` (see [here](https://lightning.ai/docs/pytorch/stable/extensions/logging.html) for reference). |
| 190 | +# |
| 191 | + |
| 192 | +# In[9]: |
| 193 | + |
| 194 | + |
| 195 | +# create the trainer |
| 196 | +trainer = Trainer( |
| 197 | + solver=pinn, # The PINN solver to be used for training |
| 198 | + max_epochs=1500, # Maximum number of training epochs |
| 199 | + logger=True, # Enables logging (default logger is CSVLogger) |
| 200 | + callbacks=[MetricTracker()], # Tracks training metrics using MetricTracker |
| 201 | + accelerator="cpu", # Specifies the computing device ("cpu", "gpu", ...) |
| 202 | + train_size=1.0, # Fraction of the dataset used for training (100%) |
| 203 | + test_size=0.0, # Fraction of the dataset used for testing (0%) |
| 204 | + val_size=0.0, # Fraction of the dataset used for validation (0%) |
| 205 | + enable_model_summary=False, # Disables model summary printing |
| 206 | +) |
| 207 | + |
| 208 | +# train |
| 209 | +trainer.train() |
| 210 | + |
| 211 | + |
| 212 | +# After the training we can inspect trainer logged metrics (by default **PINA** logs mean square error residual loss). The logged metrics can be accessed online using one of the `Lightning` loggers. The final loss can be accessed by `trainer.logged_metrics` |
| 213 | + |
| 214 | +# In[10]: |
| 215 | + |
| 216 | + |
| 217 | +# inspecting final loss |
| 218 | +trainer.logged_metrics |
| 219 | + |
| 220 | + |
| 221 | +# By using `matplotlib` we can also do some qualitative plots of the solution. |
| 222 | + |
| 223 | +# In[11]: |
| 224 | + |
| 225 | + |
| 226 | +pts = pinn.problem.spatial_domain.sample(256, "grid", variables="x") |
| 227 | +predicted_output = pinn.forward(pts).extract("u").tensor.detach() |
| 228 | +true_output = pinn.problem.solution(pts).detach() |
| 229 | +fig, ax = plt.subplots(nrows=1, ncols=1) |
| 230 | +ax.plot(pts.extract(["x"]), predicted_output, label="Neural Network solution") |
| 231 | +ax.plot(pts.extract(["x"]), true_output, label="True solution") |
| 232 | +_ = plt.legend() |
| 233 | + |
| 234 | + |
| 235 | +# The solution is overlapped with the actual one, and they are barely indistinguishable. We can also visualize the loss during training using the `MetricTracker`: |
| 236 | + |
| 237 | +# In[12]: |
| 238 | + |
| 239 | + |
| 240 | +# plot loss |
| 241 | +trainer_metrics = trainer.callbacks[0].metrics |
| 242 | +loss = trainer_metrics["train_loss"] |
| 243 | +epochs = range(len(loss)) |
| 244 | +plt.plot(epochs, loss.cpu()) |
| 245 | +# plotting |
| 246 | +plt.xlabel("epoch") |
| 247 | +plt.ylabel("loss") |
| 248 | +plt.yscale("log") |
| 249 | + |
| 250 | + |
| 251 | +# ## What's Next? |
| 252 | +# |
| 253 | +# Congratulations on completing the introductory tutorial on Physics-Informed Training! Now that you have a solid foundation, here are several exciting directions you can explore: |
| 254 | +# |
| 255 | +# 1. **Experiment with Training Duration & Network Architecture**: Try different training durations and tweak the network architecture to optimize performance. |
| 256 | +# |
| 257 | +# 2. **Explore Other Models in `pina.model`**: Check out other models available in `pina.model` or design your own custom PyTorch module to suit your needs. |
| 258 | +# |
| 259 | +# 3. **Run Training on a GPU**: Speed up your training by running on a GPU and compare the performance improvements. |
| 260 | +# |
| 261 | +# 4. **Test Various Solvers**: Explore and evaluate different solvers to assess their performance on various types of problems. |
| 262 | +# |
| 263 | +# 5. **... and many more!**: The possibilities are vast! Continue experimenting with advanced configurations, solvers, and other features in PINA. |
| 264 | +# |
| 265 | +# For more resources and tutorials, check out the [PINA Documentation](https://mathlab.github.io/PINA/). |
0 commit comments