Skip to content

Commit

Permalink
Add vLLM support
Browse files Browse the repository at this point in the history
* run on larger GH actions runner and print disk info

* add vllm support by modifying the Dockerfile
  • Loading branch information
chrisjkuch committed Feb 7, 2024
1 parent 3a6c8a2 commit 7a0b601
Show file tree
Hide file tree
Showing 7 changed files with 607 additions and 1,337 deletions.
6 changes: 5 additions & 1 deletion .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ permissions:
jobs:
build:
name: Build
runs-on: ubuntu-latest
runs-on: snomed-ubuntu-22.04-8core
strategy:
matrix:
proc: ["cpu", "gpu"]
Expand All @@ -40,6 +40,10 @@ jobs:
steps:
- uses: actions/checkout@v4

- name: Disk information
run: |
df -h
- name: Build Image
run: |
docker build runtime \
Expand Down
10 changes: 9 additions & 1 deletion runtime/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@ ENV DEBIAN_FRONTEND=noninteractive \
LANG=C.UTF-8 \
LC_ALL=C.UTF-8 \
PYTHONUNBUFFERED=1 \
SHELL=/bin/bash
SHELL=/bin/bash \
CPU_OR_GPU=${CPU_OR_GPU}

COPY apt.txt apt.txt
RUN apt-get update --fix-missing \
Expand All @@ -21,6 +22,13 @@ COPY --chown=$MAMBA_USER:$MAMBA_USER conda-lock-${CPU_OR_GPU}.yml /tmp/conda-loc
RUN micromamba install --name base --yes --file /tmp/conda-lock.yml && \
micromamba clean --all --force-pkgs-dirs --yes

ARG MAMBA_DOCKERFILE_ACTIVATE=1
RUN if [ "$CPU_OR_GPU" = "gpu" ]; then \
pip install https://github.com/vllm-project/vllm/releases/download/v0.3.0/vllm-0.3.0+cu118-cp310-cp310-manylinux1_x86_64.whl --no-cache-dir ; \
else \
pip install https://github.com/vllm-project/vllm/releases/download/v0.3.0/vllm-0.3.0-cp310-cp310-manylinux1_x86_64.whl --no-cache-dir ; \
fi

RUN mkdir /code_execution
RUN chown -R ${MAMBA_USER}:${MAMBA_USER} /code_execution

Expand Down
Loading

0 comments on commit 7a0b601

Please sign in to comment.