diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..9b06692d7 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,17 @@ +FROM nvidia/cuda:12.6.3-base-ubuntu24.04 + +# Install required packages +RUN apt-get update && apt-get install -y python3 python3-pip python3-venv + +# Copy application files +COPY . /app +WORKDIR /app + +# Create and activate virtual environment +RUN python3 -m venv /opt/venv +ENV PATH="/opt/venv/bin:$PATH" + +# Install dependencies in virtual environment +RUN pip install -e .[cuda] + +CMD ["/bin/bash"] \ No newline at end of file diff --git a/README.md b/README.md index 450e0bdad..f50764a7b 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,13 @@ git clone https://github.com/jwohlwend/boltz.git cd boltz; pip install -e .[cuda] ``` +or using Docker: + +```bash +docker build -t boltz-2 . +``` + + If you are installing on CPU-only or non-CUDA GPus hardware, remove `[cuda]` from the above commands. Note that the CPU version is significantly slower than the GPU version. ## Inference @@ -45,6 +52,17 @@ You can run inference using Boltz with: boltz predict input_path --use_msa_server ``` +Using the docker image: + +```bash +docker run \ + -it --gpus all \ + boltz-2:latest \ + bash -c "boltz predict input_path --use_msa_server" +``` + +Make sure to install [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) to run with GPU support. + `input_path` should point to a YAML file, or a directory of YAML files for batched processing, describing the biomolecules you want to model and the properties you want to predict (e.g. affinity). To see all available options: `boltz predict --help` and for more information on these input formats, see our [prediction instructions](docs/prediction.md). By default, the `boltz` command will run the latest version of the model.