diff --git a/.gitmodules b/.gitmodules index 4ddfe666..f3fbd8b3 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,7 +1,3 @@ [submodule "sdk"] path = sdk url = https://github.com/MaximIntegratedAI/MAX78000_SDK.git -[submodule "distiller"] - path = distiller - url = https://github.com/MaximIntegratedAI/distiller.git - branch = pytorch-1.5 diff --git a/README.md b/README.md index dfdbb349..16ecaa08 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # MAX78000 Model Training and Synthesis -_April 20, 2021_ +_April 22, 2021_ The Maxim Integrated AI project is comprised of four repositories: @@ -90,23 +90,63 @@ The following software is optional, and can be replaced with other similar softw ### Project Installation -*The software in this project uses Python 3.8.9 or a later 3.8.x version.* +#### System Packages -It is not necessary to install Python 3.8.9 system-wide, or to rely on the system-provided Python. To manage Python versions, use `pyenv` (https://github.com/pyenv/pyenv). +Some additional system packages are required, and installation of these additional packages requires administrator privileges. Note that this is the only time administrator privileges are required unless the optional Manifold is installed locally. -On macOS (no CUDA support available): +##### macOS + +On macOS (no CUDA support available) use: ```shell -$ brew install pyenv pyenv-virtualenv libomp libsndfile tcl-tk +$ brew install libomp libsndfile tcl-tk ``` -On Linux: +##### Linux (Ubuntu) ```shell $ sudo apt-get install -y make build-essential libssl-dev zlib1g-dev \ libbz2-dev libreadline-dev libsqlite3-dev wget curl llvm \ libncurses5-dev libncursesw5-dev xz-utils tk-dev libffi-dev liblzma-dev \ libsndfile-dev portaudio19-dev +``` + +###### RedHat Enterprise Linux / CentOS 8 + +While Ubuntu 20.04 LTS is the supported distribution, the MAX78000 software packages run fine on all modern Linux distributions that also support CUDA. The *apt-get install* commands above must be replaced with distribution specific commands and package names. Unfortunately, there is no obvious 1:1 mapping between package names from one distribution to the next. The following example shows the commands needed for RHEL/CentOS 8. + +*Two of the required packages are not in the base repositories. Enable the EPEL and PowerTools repositories:* + +```shell +$ sudo dnf install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm +$ sudo dnf config-manager --set-enabled powertools +``` + +*Proceed to install the required packages:* + +```shell +$ sudo dnf group install "Development Tools" +$ sudo dnf install openssl-devel zlib-devel \ + bzip2-devel readline-devel sqlite-devel wget llvm \ + xz-devel tk tk-devel libffi-devel \ + libsndfile libsndfile-devel portaudio-devel +``` + +#### Python 3.8 + +*The software in this project uses Python 3.8.9 or a later 3.8.x version.* + +It is not necessary to install Python 3.8.9 system-wide, or to rely on the system-provided Python. To manage Python versions, use `pyenv` (https://github.com/pyenv/pyenv). + +On macOS (no CUDA support available): + +```shell +$ brew install pyenv pyenv-virtualenv +``` + +On Linux: + +```shell $ curl -L https://github.com/pyenv/pyenv-installer/raw/master/bin/pyenv-installer | bash # NOTE: Verify contents of the script before running it!! ``` @@ -161,9 +201,9 @@ Add this line to `~/.profile` (and on macOS, to `~/.zprofile`). #### Nervana Distiller -Nirvana Distiller is package for neural network compression and quantization. Network compression can reduce the memory footprint of a neural network, increase its inference speed and save energy. Distiller is automatically installed with the other packages. +Nirvana Distiller is package for neural network compression and quantization. Network compression can reduce the memory footprint of a neural network, increase its inference speed and save energy. Distiller is automatically installed as a git sub-module with the other packages. -#### Uber Manifold +#### Uber Manifold (Optional) Manifold is a model-agnostic visual debugging tool for machine learning. Manifold can compare models, detects which subset of data a model is inaccurately predicting, and explains the potential cause of poor model performance by surfacing the feature distribution difference between better and worse-performing subsets of data. diff --git a/README.pdf b/README.pdf index df568ec8..693740f1 100644 Binary files a/README.pdf and b/README.pdf differ diff --git a/distiller b/distiller deleted file mode 160000 index 26b8d727..00000000 --- a/distiller +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 26b8d727083cd821812dd74502d34479596a598d diff --git a/izer/quantize.py b/izer/quantize.py index b4066ac1..1707b2dd 100644 --- a/izer/quantize.py +++ b/izer/quantize.py @@ -12,8 +12,6 @@ import torch -from distiller.apputils.checkpoint import get_contents_table # pylint: disable=no-name-in-module - from . import tornadocnn as tc from . import yamlcfg from .devices import device @@ -46,9 +44,6 @@ def convert_checkpoint(input_file, output_file, arguments): print("Converting checkpoint file", input_file, "to", output_file) checkpoint = torch.load(input_file, map_location='cpu') - if arguments.verbose: - print(get_contents_table(checkpoint)) - if 'state_dict' not in checkpoint: eprint("No `state_dict` in checkpoint file.") diff --git a/requirements.txt b/requirements.txt index 20636a1c..3b9ddf8d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,6 @@ six>=1.12.0 scipy>=1.3.0 torch==1.8.1 pytest~=4.6.4 -onnx>=1.7.0 +onnx>=1.9.0 tensorboard==2.4.1 colorama>=0.4.4 --e file:distiller