From 2bbf5937af2b811b19467727c739b96f6525d825 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roman=20R=C3=A4dle?= Date: Sun, 11 Dec 2022 17:43:21 -0800 Subject: [PATCH 1/2] Install PyTorch Tutorial Basic tutorial for how to install the Python PyTorch dependency --- .../docs/tutorials/models/install-pytorch.mdx | 76 +++++++++++++++++++ website/fb/sdoc-cache.json | 4 + website/sidebars.js | 10 ++- 3 files changed, 89 insertions(+), 1 deletion(-) create mode 100644 website/docs/tutorials/models/install-pytorch.mdx create mode 100644 website/fb/sdoc-cache.json diff --git a/website/docs/tutorials/models/install-pytorch.mdx b/website/docs/tutorials/models/install-pytorch.mdx new file mode 100644 index 000000000..b5d94937d --- /dev/null +++ b/website/docs/tutorials/models/install-pytorch.mdx @@ -0,0 +1,76 @@ +--- +id: install-pytorch +sidebar_position: 1 +title: Install PyTorch +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +In this tutorial, we will learn a quick way for how to set up PyTorch. + +# [OPTIONAL] Set up Python virtual environment + +It is recommended to run the Python scripts in a virtual environment. Python offers a command to create a virtual environment with the following command. + +```shell +python3 -m venv venv +source venv/bin/activate +``` + +# Install `torch` dependency + +Last, let's install the PyTorch dependencies via the Python package manager. + + + + + ```shell + pip install torch==1.12.1 + ``` + + + + + ```shell + pip install torch==1.12.1+cpu + ``` + + + + + ```shell + pip install torch==1.12.1+cpu + ``` + + + + +# Test Installation + +Open Python interpreter in terminal + +```python +python +``` + +Then execute the two lines of code, which will print the PyTorch version + +```python +import torch +print(torch.__version__) +``` + +```python title="Output" +1.12.1 +``` + +Exit the Python interpreter with `exit()`. + +That's it! PyTorch is installed successfully \ No newline at end of file diff --git a/website/fb/sdoc-cache.json b/website/fb/sdoc-cache.json new file mode 100644 index 000000000..e172e37eb --- /dev/null +++ b/website/fb/sdoc-cache.json @@ -0,0 +1,4 @@ +{ + "snippets": {}, + "description": "@generated" +} \ No newline at end of file diff --git a/website/sidebars.js b/website/sidebars.js index 1f4e80c1d..a4fc98d79 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -35,7 +35,15 @@ module.exports = { }, { type: 'category', - label: 'Tutorials', + label: 'Model Tutorials', + collapsed: false, + items: [ + 'tutorials/models/install-pytorch', + ], + }, + { + type: 'category', + label: 'Demo Tutorials', collapsed: false, items: [ 'tutorials/snacks/image-classification', From c9ce235aaefdf0a310f05b1ad6728f1b5b19cbf2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roman=20R=C3=A4dle?= Date: Sun, 11 Dec 2022 17:46:44 -0800 Subject: [PATCH 2/2] Hello World Model Tutorial Basic "Hello World" model tutorial --- .../tutorials/models/hello-world-model.mdx | 163 ++++++++++++++++++ website/sidebars.js | 1 + 2 files changed, 164 insertions(+) create mode 100644 website/docs/tutorials/models/hello-world-model.mdx diff --git a/website/docs/tutorials/models/hello-world-model.mdx b/website/docs/tutorials/models/hello-world-model.mdx new file mode 100644 index 000000000..8620d6a12 --- /dev/null +++ b/website/docs/tutorials/models/hello-world-model.mdx @@ -0,0 +1,163 @@ +--- +id: hello-world-model +sidebar_position: 2 +title: Hello World Model +--- + +In this tutorial, you will create a "Hello World" model. The model will take a +string as input and return a string as output. You will also learn how to export +a model as TorchScript model that can be loaded with the PlayTorch SDK for +on-device inference. + +# Create PyTorch Model + +Let's begin by creating a PyTorch model. Here, we are going to create a simple +"Hello World" model using `torch.nn.Module` to represent a neural network (hence +the namespace `nn`). + +The model defines a `forward` function with one argument `name`. The function +"performs" the computation, e.g., in later tutorials, it will perform inference +on an image. + +The model constructor has one argument `prefix`, which will be used in the +`forward` function to prefix the `name` argument. + +More details on PyTorch modules at https://pytorch.org/docs/stable/notes/modules.html + +```python +import torch +from torch import nn + +class Model(nn.Module): + def __init__(self, prefix: str): + super().__init__() + self.prefix = prefix + + def forward(self, name: str) -> str: + return f"{self.prefix} {name}!" +``` + +## Create an instance of the model + +Next, let's create a instance of the model and perform a computation. + +```python +model = Model("Hello") +model("Roman") +``` + +```python title="Output" +Hello Roman! +``` + +# Export Model for Mobile + +Now that we have a model, let's export the model to use on mobile. To do that, +we need to script the model (i.e., create a +[TorchScript](https://pytorch.org/docs/stable/jit.html) representation) as follows: + +```python +scripted_model = torch.jit.script(model) +scripted_model("Lindsay") +``` + +```python title="Output" +Hello Lindsay! +``` + +:::note +The `torch.jit.script` is the recommended way to create a `TorchScript` model +because it can capture control flow, +but it might fail in some cases. If that happens, we recommend consulting the PyTorch +[TorchScript](https://pytorch.org/docs/stable/jit.html) documentation for solutions. +::: + +PyTorch offers the `optimize_for_mobile` utility function to run a list of +optimizations on the model (e.g., Conv2D + BatchNorm fusion, dropout removal). +It's recommended to optimize the model with this utility before exporting it for +mobile. + +More details on the `optimize_for_mobile` utility at: https://pytorch.org/docs/stable/mobile_optimizer.html + +```python +from torch.utils.mobile_optimizer import optimize_for_mobile + +optimized_model = optimize_for_mobile(scripted_model) +optimized_model("Kodo") +``` + +```python title="Output" +Hello Kodo! +``` + +Great! Now, let's export the model for mobile. This is done by saving the model +for the lite interpreter. The `_save_for_lite_interpreter` function will create +a `hello_world.ptl` file, which we will be able to load with the PlayTorch SDK. + +```python +optimized_model._save_for_lite_interpreter("hello_world.ptl") +``` + +More details on the lite interpreter at: +https://pytorch.org/tutorials/prototype/lite_interpreter.html + +# Create Mobile UI and Load Model on Mobile + +Next, let's create a PlayTorch Snack by following the link +http://snack.playtorch.dev/. Then, drag and drop the `hello_world.ptl` file onto +the just created PlayTorch Snack--this will import the model into the Snack. + +Replace the source code in the `App.js` with the React Native source code below. +The source code below will create a user interface with a text input, a button, +and a text element. When pressing the button, it will load the `hello_world.ptl` +model and call the model forward function with the text input value as argument. +The returned model output will then be displayed below the button. + +```jsx +import * as React from 'react'; +import { useState } from 'react'; +import { + Button, + SafeAreaView, + StyleSheet, + Text, + TextInput, + View, +} from 'react-native'; +import { torch, MobileModel } from 'react-native-pytorch-core'; + +export default function App() { + const [modelInput, setModelInput] = useState(''); + const [modelOutput, setModelOutput] = useState(''); + + async function handleModelInput() { + const filePath = await MobileModel.download(require('./hello_world.ptl')); + const model = await torch.jit._loadForMobile(filePath); + const output = await model.forward(modelInput); + setModelOutput(output); + } + + return ( + + + +