diff --git a/.github/workflows/jetson-pypi-health.yml b/.github/workflows/jetson-pypi-health.yml
index 998132b1b..8484d3ce7 100644
--- a/.github/workflows/jetson-pypi-health.yml
+++ b/.github/workflows/jetson-pypi-health.yml
@@ -2,8 +2,6 @@
name: Check PyPI Health
on:
- schedule:
- - cron: '*/5 * * * *'
workflow_dispatch:
jobs:
diff --git a/packages/build/python/Dockerfile b/packages/build/python/Dockerfile
index b7873d1f0..5bd86cba7 100644
--- a/packages/build/python/Dockerfile
+++ b/packages/build/python/Dockerfile
@@ -2,7 +2,7 @@
# name: python
# group: build
# depends: [build-essential, pip_cache]
-# notes: installs core `python3` packages and `pip`
+# notes: installs core `python`, `uv` and `pip` in a conda environment
#---
ARG BASE_IMAGE
FROM ${BASE_IMAGE}
@@ -24,7 +24,11 @@ ENV PYTHON_VERSION=${PYTHON_VERSION} \
PIP_VERBOSE=1 \
TWINE_NON_INTERACTIVE=1 \
DEBIAN_FRONTEND=noninteractive \
- PATH=/opt/venv/bin:$PATH
+ PATH=/opt/conda/bin:$PATH \
+ MAMBA_ROOT_PREFIX=/opt/conda \
+ MAMBA_EXE=/usr/local/bin/micromamba \
+ CONDA_PREFIX=/opt/conda \
+ UV_COMPILE_BYTECODE=1
#PYTHONPATH=/opt/venv/lib/python${PYTHON_VERSION_ARG}/site-packages:/usr/lib/python3/dist-packages:$PYTHONPATH \
diff --git a/packages/build/python/config.py b/packages/build/python/config.py
index c7d80f2c1..8d6358f8c 100644
--- a/packages/build/python/config.py
+++ b/packages/build/python/config.py
@@ -13,7 +13,7 @@ def python(version, requires=None) -> list:
if requires:
pkg['requires'] = requires
-
+
return pkg
package = [
diff --git a/packages/build/python/install.sh b/packages/build/python/install.sh
index 94006e070..b793066e0 100755
--- a/packages/build/python/install.sh
+++ b/packages/build/python/install.sh
@@ -1,71 +1,59 @@
#!/usr/bin/env bash
-# Python installer
+# Python installer using Micromamba
set -x
-apt-get update
-apt-get install -y --no-install-recommends \
- python${PYTHON_VERSION} \
- python${PYTHON_VERSION}-dev
-
-which python${PYTHON_VERSION}
-return_code=$?
-set -e
-
-if [ $return_code != 0 ]; then
- echo "-- using deadsnakes ppa to install Python ${PYTHON_VERSION}"
- add-apt-repository ppa:deadsnakes/ppa
- apt-get update
- apt-get install -y --no-install-recommends \
- python${PYTHON_VERSION} \
- python${PYTHON_VERSION}-dev
-fi
-
-# path 1: Python 3.8-3.10 for JP5/6
-# path 2: Python 3.6 for JP4
-# path 3: Python 3.12 for 24.04
-distro=$(lsb_release -rs)
-
-if [ $distro = "24.04" ]; then
- apt-get install -y --no-install-recommends python3-venv
- python3 -m venv --system-site-packages /opt/venv
- source /opt/venv/bin/activate
- curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION}
-elif [ $distro = "20.04" ]; then
- curl -sS https://bootstrap.pypa.io/pip/3.8/get-pip.py | python3.8
-elif [ $distro = "18.04" ]; then
- curl -sS https://bootstrap.pypa.io/pip/3.6/get-pip.py | python3.6
-else
- curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION}
+curl -Ls https://micro.mamba.pm/api/micromamba/linux-$(uname -m | sed 's/x86_64/64/;s/aarch64/aarch64/')/latest | tar -xvj bin/micromamba
+mv bin/micromamba /usr/local/bin/
+
+export MAMBA_ROOT_PREFIX=/opt/conda
+export CONDA_PREFIX=/opt/conda
+export PATH=/opt/conda/bin:$PATH
+mkdir -p $MAMBA_ROOT_PREFIX
+eval "$(micromamba shell hook -s posix)"
+micromamba shell init -s bash -r $MAMBA_ROOT_PREFIX
+
+# Install Python and core packages in base environment
+micromamba install -y -n base \
+ python=${PYTHON_VERSION} \
+ setuptools \
+ packaging \
+ "cython<3" \
+ wheel \
+ pip \
+ uv \
+ twine \
+ psutil \
+ pkginfo
+
+# Add micromamba initialization to /etc/profile.d for all shells
+cat > /etc/profile.d/mamba.sh << 'EOF'
+export MAMBA_EXE=/usr/bin/micromamba
+export MAMBA_ROOT_PREFIX=/opt/conda
+export PATH=/opt/conda/bin:$PATH
+
+if [ -f "${MAMBA_EXE}" ]; then
+ __mamba_setup="$("$MAMBA_EXE" shell hook --shell bash --root-prefix "$MAMBA_ROOT_PREFIX" 2> /dev/null)"
+ if [ $? -eq 0 ]; then
+ eval "$__mamba_setup"
+ else
+ alias micromamba="$MAMBA_EXE"
+ fi
+ unset __mamba_setup
fi
-rm -rf /var/lib/apt/lists/*
-apt-get clean
+micromamba activate
+EOF
-ln -f -s /usr/bin/python${PYTHON_VERSION} /usr/local/bin/python3
-#ln -s /usr/bin/pip${PYTHON_VERSION} /usr/local/bin/pip3
+chmod +x /etc/profile.d/mamba.sh
+source /etc/profile.d/mamba.sh
-# This was causing issues downstream (e.g. Python2.7 still around in Ubuntu 18.04,
-# and in cmake python enumeration where some packages expect that 'python' is 2.7)
-# Another way is apt package 'python-is-python3' - symlinks /usr/bin/python to python
-#RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1 && \ \
-# update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1 \
+ln -sf /opt/conda/bin/python /usr/local/bin/python3
+ln -sf /opt/conda/bin/python /usr/local/bin/python
+ln -sf /opt/conda/bin/pip /usr/local/bin/pip3
+ln -sf /opt/conda/bin/pip /usr/local/bin/pip
+# Verify installation
which python3
python3 --version
-
which pip3
pip3 --version
-
-python3 -m pip install --upgrade pip pkginfo --index-url https://pypi.org/simple
-
-pip3 install --no-binary :all: psutil
-pip3 install --upgrade \
- setuptools \
- packaging \
- 'Cython' \
- wheel \
- uv
-
-pip3 install --upgrade --index-url https://pypi.org/simple \
- twine
-
diff --git a/packages/ml/onnxruntime/build.sh b/packages/ml/onnxruntime/build.sh
index d018c21a5..c1d2c7ab6 100755
--- a/packages/ml/onnxruntime/build.sh
+++ b/packages/ml/onnxruntime/build.sh
@@ -10,7 +10,7 @@ if [ ! -d "/usr/lib/$(uname -m)-linux-gnu" ]; then
fi
# Ensure TensorRT libraries are in LD_LIBRARY_PATH
-export LD_LIBRARY_PATH=/usr/lib/$(uname -m)-linux-gnu:$LD_LIBRARY_PATH
+export LD_LIBRARY_PATH=/usr/lib/$(uname -m)-linux-gnu:/usr/local/cuda/compat:/usr/local/cuda/lib64:/usr/lib/$(uname -m)-linux-gnu/nvidia/:$LD_LIBRARY_PATH
# Verify TensorRT components
if [ ! -f "/usr/lib/$(uname -m)-linux-gnu/libnvinfer.so" ]; then
@@ -20,7 +20,7 @@ fi
if [ ! -f "/usr/lib/$(uname -m)-linux-gnu/libnvdla_compiler.so" ]; then
echo "TensorRT NVDLA compiler library not found"
- exit 1
+ echo "TensorRT NVDLA compiler library not found - continuing anyway"
fi
pip3 uninstall -y onnxruntime || echo "onnxruntime was not previously installed"
diff --git a/packages/smart-home/homeassistant-base/config.py b/packages/smart-home/homeassistant-base/config.py
index 7965cb5f5..c678ab912 100644
--- a/packages/smart-home/homeassistant-base/config.py
+++ b/packages/smart-home/homeassistant-base/config.py
@@ -193,8 +193,12 @@ def latest_deps_versions(branch_name: str) -> Tuple[Optional[str], Optional[str]
def create_package(version, default=False) -> list:
pkg = package.copy()
- wanted_version = github_latest_tag('home-assistant/docker-base') if version == 'latest' else version
- bashio_version, tempio_version, s6_overlay_version = latest_deps_versions(wanted_version)
+ try:
+ wanted_version = github_latest_tag('home-assistant/docker-base') if version == 'latest' else version
+ bashio_version, tempio_version, s6_overlay_version = latest_deps_versions(wanted_version)
+ except Exception as e:
+ print(f"Failed to fetch the latest version of dependencies: {e}")
+ bashio_version, tempio_version, s6_overlay_version = None, None, None
if not all([bashio_version, tempio_version, s6_overlay_version]):
log_warning("Failed to get dependency versions, using defaults")
diff --git a/packages/speech/orpheus/Dockerfile b/packages/speech/orpheus/Dockerfile
new file mode 100644
index 000000000..1f1365316
--- /dev/null
+++ b/packages/speech/orpheus/Dockerfile
@@ -0,0 +1,56 @@
+#---
+# name: orpheus
+# group: audio
+# depends: [torchaudio, numpy, tensorrt, onnxruntime, ffmpeg]
+# requires: '>=34.1.0'
+#---
+ARG BASE_IMAGE
+FROM ${BASE_IMAGE}
+
+# Set non-interactive frontend
+ENV DEBIAN_FRONTEND=noninteractive
+
+# Install system dependencies
+RUN apt-get update && \
+ apt-get install -y \
+ portaudio19-dev \
+ && apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+# Create app directory
+RUN mkdir -p /app/outputs && \
+ chmod 777 /app/outputs
+
+# Clone and install Orpheus-FastAPI
+RUN git clone https://github.com/Lex-au/Orpheus-FastAPI /opt/orpheus && \
+ cd /opt/orpheus && \
+ # Update requirements to use specific versions that work with Jetson
+ sed -i 's/numpy==1.24.0/numpy>=1.24.0/g' requirements.txt && \
+ echo "👁️######## Orpheus-FastAPI - requirements.txt ########👁️" && \
+ cat requirements.txt && \
+ echo "👁️^^^^^^^^ Orpheus-FastAPI - requirements.txt ^^^^^^^^👁️" && \
+ pip3 install -r requirements.txt
+
+# Set up environment variables
+ENV PYTHONUNBUFFERED=1 \
+ PYTHONPATH=/opt/orpheus \
+ USE_GPU=true
+
+# Verify installations
+RUN python3 -c "import torch; print('PyTorch version:', torch.__version__); \
+ print('CUDA available:', torch.cuda.is_available()); \
+ print('CUDA device count:', torch.cuda.device_count() if torch.cuda.is_available() else 'N/A')"
+
+RUN python3 -c "from snac import SNAC; print('SNAC import successful')"
+
+
+# Set default port
+ARG PORT=5005
+ENV PORT=${PORT}
+EXPOSE ${PORT}
+
+# Set working directory
+WORKDIR /opt/orpheus
+
+# Set the entrypoint
+CMD ["python3", "-m", "uvicorn", "app:app", "--host", "0.0.0.0", "--port", "5005", "--workers", "1"]
diff --git a/packages/speech/speaches/Dockerfile b/packages/speech/speaches/Dockerfile
index 515b447e2..40338658d 100644
--- a/packages/speech/speaches/Dockerfile
+++ b/packages/speech/speaches/Dockerfile
@@ -1,35 +1,29 @@
#---
# name: speaches
# group: audio
-# depends: [faster-whisper, piper-tts, kokoro-tts:onnx, python:3.12, nodejs]
+# depends: [faster-whisper, piper-tts, kokoro-tts:onnx, python:3.12, ffmpeg]
# requires: '>=34.1.0'
# docs: docs.md
#---
ARG BASE_IMAGE
FROM ${BASE_IMAGE}
-# RUN apt-get update && \
-# apt-get install -y libsndfile1-dev && \
-# apt-get clean && \
-# rm -rf /var/lib/apt/lists/*
-
-# RUN python3 -c "import onnxruntime; print('☑️✅ ONNX Runtime version:', onnxruntime.__version__); print('✅ EPs:', onnxruntime.get_available_providers())"
+RUN python3 -c "import onnxruntime; print('☑️✅ ONNX Runtime version:', onnxruntime.__version__); print('✅ EPs:', onnxruntime.get_available_providers())"
# 1. Clone and install speaches
RUN set -ex && \
- git clone https://github.com/speaches-ai/speaches /opt/speaches && \
+ git clone -b renovate/jetson-arm64 https://github.com/JonnyTran/speaches /opt/speaches && \
cd /opt/speaches && \
sed -i 's|requires-python.*|requires-python = ">=3.10"|g' pyproject.toml && \
sed -i 's|"faster-whisper.*",|"faster-whisper",|g' pyproject.toml && \
sed -i 's|"ctranslate2.*",|"ctranslate2",|g' pyproject.toml && \
sed -i 's|"kokoro-onnx.*",|"kokoro-onnx",|g' pyproject.toml && \
sed -i 's|"numpy.*",|"numpy",|g' pyproject.toml && \
- echo "👁️######## speaches - pyproject.toml ########👁️" && \
+ echo "######### speaches - pyproject.toml ######" && \
cat pyproject.toml && \
- echo "👁️^^^^^^^^ speaches - pyproject.toml ^^^^^^^^👁️" && \
- pip3 install '.[ui]' && \
- cp /opt/speaches/model_aliases.json / && \
- pip3 uninstall -y onnxruntime onnxruntime-gpu || true
+ echo "^^^^^^^^ speaches - pyproject.toml ^^^^^6" && \
+ uv pip install '.[ui]' && \
+ cp /opt/speaches/model_aliases.json / || true
# Patch to skip UI mount if dist/ missing
COPY 0001-Conditionally-mount-realtime-console-dist.patch /tmp/
@@ -46,11 +40,12 @@ ENV PYTHONPATH="/opt/speaches/src:${PYTHONPATH}"
RUN python3 -c "import speaches; import openai; import aiostream; print('✅ all modules import OK')"
# 2. Extract GPU-enabled build
-RUN wget https://apt.jetson-ai-lab.dev/jp6/cu128/24.04/onnxruntime-gpu-1.22.tar.gz && \
- tar -xzvf onnxruntime-gpu-1.22.tar.gz -C /usr/local
+# RUN wget https://apt.jetson-ai-lab.dev/jp6/cu128/24.04/onnxruntime-gpu-1.22.tar.gz && \
+# tar -xzvf onnxruntime-gpu-1.22.tar.gz -C /usr/local
# 3. Install ONNX Runtime GPU wheel if needed
-RUN pip3 install onnxruntime-gpu==1.22
+# RUN pip3 list
+# RUN pip3 install onnxruntime-gpu==1.22
# 4. Confirm ONNX Runtime is correct
RUN python3 -c "import onnxruntime; print('☑️✅ ONNX Runtime version:', onnxruntime.__version__); print('✅ EPs:', onnxruntime.get_available_providers())"
diff --git a/packages/speech/speaches/README.md b/packages/speech/speaches/README.md
new file mode 100644
index 000000000..f3d8c2fba
--- /dev/null
+++ b/packages/speech/speaches/README.md
@@ -0,0 +1,70 @@
+# speaches
+
+> [`CONTAINERS`](#user-content-containers) [`IMAGES`](#user-content-images) [`RUN`](#user-content-run) [`BUILD`](#user-content-build)
+
+docs.md
+
+CONTAINERS
+
+
+| **`speaches`** | |
+| :-- | :-- |
+| Builds | [](https://github.com/dusty-nv/jetson-containers/actions/workflows/speaches_jp60.yml) [](https://github.com/dusty-nv/jetson-containers/actions/workflows/speaches_jp51.yml) |
+| Requires | `L4T ['>=34.1.0']` |
+| Dependencies | [`build-essential`](/packages/build/build-essential) [`cuda`](/packages/cuda/cuda) [`cudnn`](/packages/cuda/cudnn) [`python`](/packages/build/python) [`numpy`](/packages/numpy) [`cmake`](/packages/build/cmake/cmake_pip) [`ffmpeg`](/packages/multimedia/ffmpeg) [`onnx`](/packages/onnx) [`ctranslate2`](/packages/ctranslate2) [`huggingface_hub`](/packages/llm/huggingface_hub) [`faster-whisper`](/packages/audio/faster-whisper) |
+| Dependants | |
+| Dockerfile | [`Dockerfile`](Dockerfile) |
+| Images | |
+
+
+
+
+CONTAINER IMAGES
+
+
+| Repository/Tag | Date | Arch | Size |
+| :-- | :--: | :--: | :--: |
+
+> Container images are compatible with other minor versions of JetPack/L4T:
+> • L4T R32.7 containers can run on other versions of L4T R32.7 (JetPack 4.6+)
+> • L4T R35.x containers can run on other versions of L4T R35.x (JetPack 5.1+)
+
+
+
+RUN CONTAINER
+
+
+To start the container, you can use [`jetson-containers run`](/docs/run.md) and [`autotag`](/docs/run.md#autotag), or manually put together a [`docker run`](https://docs.docker.com/engine/reference/commandline/run/) command:
+```bash
+# automatically pull or build a compatible container image
+jetson-containers run $(autotag speaches)
+
+# or explicitly specify one of the container images above
+jetson-containers run dustynv/speaches:r35.3.1
+
+# or if using 'docker run' (specify image and mounts/ect)
+sudo docker run --runtime nvidia -it --rm --network=host dustynv/speaches:r35.3.1
+```
+> [`jetson-containers run`](/docs/run.md) forwards arguments to [`docker run`](https://docs.docker.com/engine/reference/commandline/run/) with some defaults added (like `--runtime nvidia`, mounts a `/data` cache, and detects devices)
+> [`autotag`](/docs/run.md#autotag) finds a container image that's compatible with your version of JetPack/L4T - either locally, pulled from a registry, or by building it.
+
+To mount your own directories into the container, use the [`-v`](https://docs.docker.com/engine/reference/commandline/run/#volume) or [`--volume`](https://docs.docker.com/engine/reference/commandline/run/#volume) flags:
+```bash
+jetson-containers run -v /path/on/host:/path/in/container $(autotag speaches)
+```
+To launch the container running a command, as opposed to an interactive shell:
+```bash
+jetson-containers run $(autotag speaches) my_app --abc xyz
+```
+You can pass any options to it that you would to [`docker run`](https://docs.docker.com/engine/reference/commandline/run/), and it'll print out the full command that it constructs before executing it.
+
+
+BUILD CONTAINER
+
+
+If you use [`autotag`](/docs/run.md#autotag) as shown above, it'll ask to build the container for you if needed. To manually build it, first do the [system setup](/docs/setup.md), then run:
+```bash
+jetson-containers build speaches
+```
+The dependencies from above will be built into the container, and it'll be tested during. Run it with [`--help`](/jetson_containers/build.py) for build options.
+
diff --git a/packages/speech/speaches/model_aliases.json b/packages/speech/speaches/model_aliases.json
index a65b5d0ea..787a5d1d3 100644
--- a/packages/speech/speaches/model_aliases.json
+++ b/packages/speech/speaches/model_aliases.json
@@ -1,5 +1,6 @@
{
"tts-1": "hexgrad/Kokoro-82M",
"tts-1-hd": "hexgrad/Kokoro-82M",
+ "gpt-4o-mini-tts": "hexgrad/Kokoro-82M",
"whisper-1": "Systran/faster-whisper-large-v3"
-}
\ No newline at end of file
+}