{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "# For tips on running notebooks in Google Colab, see\n# https://codelin.vip/beginner/colab\n%matplotlib inline" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "PyTorch: Tensors and autograd\n=============================\n\nA third order polynomial, trained to predict $y=\\sin(x)$ from $-\\pi$ to\n$\\pi$ by minimizing squared Euclidean distance.\n\nThis implementation computes the forward pass using operations on\nPyTorch Tensors, and uses PyTorch autograd to compute gradients.\n\nA PyTorch Tensor represents a node in a computational graph. If `x` is a\nTensor that has `x.requires_grad=True` then `x.grad` is another Tensor\nholding the gradient of `x` with respect to some scalar value.\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": false }, "outputs": [], "source": [ "import torch\nimport math\n\n# We want to be able to train our model on an `accelerator `__\n# such as CUDA, MPS, MTIA, or XPU. If the current accelerator is available, we will use it. Otherwise, we use the CPU.\n\ndtype = torch.float\ndevice = torch.accelerator.current_accelerator().type if torch.accelerator.is_available() else \"cpu\"\nprint(f\"Using {device} device\")\ntorch.set_default_device(device)\n\n# Create Tensors to hold input and outputs.\n# By default, requires_grad=False, which indicates that we do not need to\n# compute gradients with respect to these Tensors during the backward pass.\nx = torch.linspace(-math.pi, math.pi, 2000, dtype=dtype)\ny = torch.sin(x)\n\n# Create random Tensors for weights. For a third order polynomial, we need\n# 4 weights: y = a + b x + c x^2 + d x^3\n# Setting requires_grad=True indicates that we want to compute gradients with\n# respect to these Tensors during the backward pass.\na = torch.randn((), dtype=dtype, requires_grad=True)\nb = torch.randn((), dtype=dtype, requires_grad=True)\nc = torch.randn((), dtype=dtype, requires_grad=True)\nd = torch.randn((), dtype=dtype, requires_grad=True)\n\nlearning_rate = 1e-6\nfor t in range(2000):\n # Forward pass: compute predicted y using operations on Tensors.\n y_pred = a + b * x + c * x ** 2 + d * x ** 3\n\n # Compute and print loss using operations on Tensors.\n # Now loss is a Tensor of shape (1,)\n # loss.item() gets the scalar value held in the loss.\n loss = (y_pred - y).pow(2).sum()\n if t % 100 == 99:\n print(t, loss.item())\n\n # Use autograd to compute the backward pass. This call will compute the\n # gradient of loss with respect to all Tensors with requires_grad=True.\n # After this call a.grad, b.grad. c.grad and d.grad will be Tensors holding\n # the gradient of the loss with respect to a, b, c, d respectively.\n loss.backward()\n\n # Manually update weights using gradient descent. Wrap in torch.no_grad()\n # because weights have requires_grad=True, but we don't need to track this\n # in autograd.\n with torch.no_grad():\n a -= learning_rate * a.grad\n b -= learning_rate * b.grad\n c -= learning_rate * c.grad\n d -= learning_rate * d.grad\n\n # Manually zero the gradients after updating weights\n a.grad = None\n b.grad = None\n c.grad = None\n d.grad = None\n\nprint(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" } }, "nbformat": 4, "nbformat_minor": 0 }