|
1 | 1 | { |
2 | | - "cells": [ |
3 | | - { |
4 | | - "cell_type": "markdown", |
5 | | - "metadata": {}, |
6 | | - "source": [ |
7 | | - "## YOLO to Rubik TFlite Conversion" |
8 | | - ] |
| 2 | + "cells": [ |
| 3 | + { |
| 4 | + "cell_type": "markdown", |
| 5 | + "metadata": { |
| 6 | + "id": "1tMAqVl4p58r" |
| 7 | + }, |
| 8 | + "source": [ |
| 9 | + "## YOLO to Rubik TFlite Conversion" |
| 10 | + ] |
| 11 | + }, |
| 12 | + { |
| 13 | + "cell_type": "markdown", |
| 14 | + "metadata": { |
| 15 | + "id": "19sCXrDSp58s" |
| 16 | + }, |
| 17 | + "source": [ |
| 18 | + "#### For Colab users only\n", |
| 19 | + "\n", |
| 20 | + "Google Colab comes with an incompatible version of Numpy installed. To fix this, please run the following cells below and **restart your session** when prompted." |
| 21 | + ] |
| 22 | + }, |
| 23 | + { |
| 24 | + "cell_type": "code", |
| 25 | + "execution_count": null, |
| 26 | + "metadata": { |
| 27 | + "colab": { |
| 28 | + "base_uri": "https://localhost:8080/", |
| 29 | + "height": 359 |
| 30 | + }, |
| 31 | + "id": "DJdbSRRjp58s", |
| 32 | + "outputId": "d5e14694-d18d-4ad2-c596-89cc19e2333e" |
| 33 | + }, |
| 34 | + "outputs": [], |
| 35 | + "source": [ |
| 36 | + "%pip uninstall numpy -y\n", |
| 37 | + "%pip install \"numpy>=1.23.0,<2.0.0\"" |
| 38 | + ] |
| 39 | + }, |
| 40 | + { |
| 41 | + "cell_type": "markdown", |
| 42 | + "metadata": { |
| 43 | + "id": "nAbygyUYp58s" |
| 44 | + }, |
| 45 | + "source": [ |
| 46 | + "#### Requirements\n", |
| 47 | + "\n", |
| 48 | + "This notebook can be run on colab.\n", |
| 49 | + "\n", |
| 50 | + "Prior to running the notebook, it is necessary to make an account on [Qualcomm's AI Hub](https://app.aihub.qualcomm.com/account/), and obtain your API key.\n", |
| 51 | + "\n", |
| 52 | + "You should also have a PyTorch model (ending in `.pt`) that's been uploaded to the runtime that you intend to convert.\n", |
| 53 | + "\n", |
| 54 | + "Please note that your API key will be listed in the output, and should therefore be redacted if the output is shared." |
| 55 | + ] |
| 56 | + }, |
| 57 | + { |
| 58 | + "cell_type": "code", |
| 59 | + "execution_count": null, |
| 60 | + "metadata": { |
| 61 | + "colab": { |
| 62 | + "base_uri": "https://localhost:8080/", |
| 63 | + "height": 1000 |
| 64 | + }, |
| 65 | + "id": "aX3JcSFKp58s", |
| 66 | + "outputId": "f2cdadd2-c448-4d8c-c681-c19decef7f3e" |
| 67 | + }, |
| 68 | + "outputs": [], |
| 69 | + "source": [ |
| 70 | + "# This installs Python package\n", |
| 71 | + "!pip install qai-hub-models[yolov8_det]\n", |
| 72 | + "# sets up AI Hub enviroment\n", |
| 73 | + "!qai-hub configure --api_token <YOUR_API_TOKEN>\n", |
| 74 | + "# Converts the model to be ran on RB3Gen2\n", |
| 75 | + "!python -m qai_hub_models.models.yolov8_det.export --quantize w8a8 --device=\"RB3 Gen 2 (Proxy)\" --ckpt-name /PATH/TO/WEIGHTS --device-os linux --target-runtime tflite\n" |
| 76 | + ] |
| 77 | + }, |
| 78 | + { |
| 79 | + "cell_type": "markdown", |
| 80 | + "metadata": { |
| 81 | + "id": "0I2cXQO4p58s" |
| 82 | + }, |
| 83 | + "source": [ |
| 84 | + "Modified from https://github.com/ramalamadingdong/yolo-rb3gen2-trainer/blob/main/AI_Hub_Quanitization_RB3Gen2.ipynb" |
| 85 | + ] |
| 86 | + } |
| 87 | + ], |
| 88 | + "metadata": { |
| 89 | + "colab": { |
| 90 | + "provenance": [] |
| 91 | + }, |
| 92 | + "kernelspec": { |
| 93 | + "display_name": "Python 3", |
| 94 | + "language": "python", |
| 95 | + "name": "python3" |
| 96 | + }, |
| 97 | + "language_info": { |
| 98 | + "name": "python", |
| 99 | + "version": "3.11.7" |
| 100 | + } |
9 | 101 | }, |
10 | | - { |
11 | | - "cell_type": "markdown", |
12 | | - "metadata": {}, |
13 | | - "source": [ |
14 | | - "#### For Colab users only\n", |
15 | | - "\n", |
16 | | - "Google Colab comes with an incompatible version of Numpy installed. To fix this, please run the following cells below and **restart your session** when prompted." |
17 | | - ] |
18 | | - }, |
19 | | - { |
20 | | - "cell_type": "code", |
21 | | - "execution_count": null, |
22 | | - "metadata": {}, |
23 | | - "outputs": [], |
24 | | - "source": [ |
25 | | - "%pip uninstall numpy -y\n", |
26 | | - "%pip install numpy>=1.23.0,<2.0.0" |
27 | | - ] |
28 | | - }, |
29 | | - { |
30 | | - "cell_type": "markdown", |
31 | | - "metadata": {}, |
32 | | - "source": [ |
33 | | - "#### Requirements\n", |
34 | | - "\n", |
35 | | - "This notebook can be run on colab.\n", |
36 | | - "\n", |
37 | | - "Prior to running the notebook, it is necessary to make an account on [Qualcomm's AI Hub](https://app.aihub.qualcomm.com/account/), and obtain your API key.\n", |
38 | | - "\n", |
39 | | - "You should also have a PyTorch model (ending in `.pt`) that's been uploaded to the runtime that you intend to convert.\n", |
40 | | - "\n", |
41 | | - "Please note that your API key will be listed in the output, and should therefore be redacted if the output is shared." |
42 | | - ] |
43 | | - }, |
44 | | - { |
45 | | - "cell_type": "code", |
46 | | - "execution_count": null, |
47 | | - "metadata": {}, |
48 | | - "outputs": [], |
49 | | - "source": [ |
50 | | - "# This installs Python package\n", |
51 | | - "!pip install qai-hub-models[yolov8-det-quantized]\n", |
52 | | - "# sets up AI Hub enviroment \n", |
53 | | - "!qai-hub configure --api_token <YOUR_API_TOKEN>\n", |
54 | | - "# Converts the model to be ran on RB3Gen2\n", |
55 | | - "!python -m qai_hub_models.models.yolov8_det_quantized.export --device=\"RB3 Gen 2 (Proxy)\" --ckpt-name /YOUR/WEIGHTS/FILE\n" |
56 | | - ] |
57 | | - }, |
58 | | - { |
59 | | - "cell_type": "markdown", |
60 | | - "metadata": {}, |
61 | | - "source": [ |
62 | | - "Modified from https://github.com/ramalamadingdong/yolo-rb3gen2-trainer/blob/main/AI_Hub_Quanitization_RB3Gen2.ipynb" |
63 | | - ] |
64 | | - } |
65 | | - ], |
66 | | - "metadata": { |
67 | | - "kernelspec": { |
68 | | - "display_name": "Python 3", |
69 | | - "language": "python", |
70 | | - "name": "python3" |
71 | | - }, |
72 | | - "language_info": { |
73 | | - "name": "python", |
74 | | - "version": "3.11.7" |
75 | | - } |
76 | | - }, |
77 | | - "nbformat": 4, |
78 | | - "nbformat_minor": 2 |
| 102 | + "nbformat": 4, |
| 103 | + "nbformat_minor": 0 |
79 | 104 | } |
0 commit comments