forked from vllm-project/vllm
-
Notifications
You must be signed in to change notification settings - Fork 15
/
Dockerfile.rocm.ubi
267 lines (214 loc) · 8.65 KB
/
Dockerfile.rocm.ubi
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
## Global Args ##################################################################
ARG BASE_UBI_IMAGE_TAG=9.4
ARG PYTHON_VERSION=3.12
# Default ROCm ARCHes to build vLLM for.
ARG PYTORCH_ROCM_ARCH="gfx908;gfx90a;gfx942;gfx1100"
ARG MAX_JOBS=12
FROM registry.access.redhat.com/ubi9/ubi-minimal:${BASE_UBI_IMAGE_TAG} AS base
ARG PYTHON_VERSION
ENV VIRTUAL_ENV=/opt/vllm
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
RUN --mount=type=cache,target=/root/.cache/pip \
microdnf -y update && \
microdnf install -y --setopt=install_weak_deps=0 --nodocs \
python${PYTHON_VERSION}-devel \
python${PYTHON_VERSION}-pip \
python${PYTHON_VERSION}-wheel && \
python${PYTHON_VERSION} -m venv $VIRTUAL_ENV && \
pip install -U pip wheel setuptools uv && \
microdnf clean all
FROM base AS rocm_base
ARG ROCM_VERSION=6.2.3
ARG PYTHON_VERSION
ARG BASE_UBI_IMAGE_TAG
RUN printf "[amdgpu]\n\
name=amdgpu\n\
baseurl=https://repo.radeon.com/amdgpu/${ROCM_VERSION}/rhel/${BASE_UBI_IMAGE_TAG}/main/x86_64/\n\
enabled=1\n\
priority=50\n\
gpgcheck=1\n\
gpgkey=https://repo.radeon.com/rocm/rocm.gpg.key\n\
[ROCm-${ROCM_VERSION}]\n\
name=ROCm${ROCM_VERSION}\n\
baseurl=https://repo.radeon.com/rocm/rhel9/${ROCM_VERSION}/main\n\
enabled=1\n\
priority=50\n\
gpgcheck=1\n\
gpgkey=https://repo.radeon.com/rocm/rocm.gpg.key" > /etc/yum.repos.d/amdgpu.repo
RUN --mount=type=cache,target=/root/.cache/pip \
--mount=type=cache,target=/root/.cache/uv \
export version="$(awk -F. '{print $1"."$2}' <<< $ROCM_VERSION)" && \
uv pip install --pre \
--index-url "https://download.pytorch.org/whl/nightly/rocm${version}" \
torch==2.6.0.dev20241107+rocm${version}\
torchvision==0.20.0.dev20241107+rocm${version} && \
# Install libdrm-amdgpu to avoid errors when retrieving device information (amdgpu.ids: No such file or directory)
microdnf install -y libdrm-amdgpu && \
microdnf clean all
ENV LD_LIBRARY_PATH="$VIRTUAL_ENV/lib/python${PYTHON_VERSION}/site-packages/numpy.libs:$LD_LIBRARY_PATH"
ENV LD_LIBRARY_PATH="$VIRTUAL_ENV/lib/python${PYTHON_VERSION}/site-packages/pillow.libs:$LD_LIBRARY_PATH"
ENV LD_LIBRARY_PATH="$VIRTUAL_ENV/lib/python${PYTHON_VERSION}/site-packages/triton/backends/amd/lib:$LD_LIBRARY_PATH"
ENV LD_LIBRARY_PATH="$VIRTUAL_ENV/lib/python${PYTHON_VERSION}/site-packages/torch/lib:$LD_LIBRARY_PATH"
RUN echo $LD_LIBRARY_PATH | tr : \\n >> /etc/ld.so.conf.d/torch-venv.conf && \
ldconfig
FROM rocm_base as rocm_devel
ENV CCACHE_DIR=/root/.cache/ccache
RUN rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm && \
rpm -ql epel-release && \
microdnf -y update && \
microdnf -y install \
ccache \
git \
# packages required to build vllm
amd-smi \
hipblas-devel \
hipblaslt-devel \
hipcc \
hipcub-devel \
hipfft-devel \
hiprand-devel \
hipsolver-devel \
hipsparse-devel \
hsa-rocr-devel \
miopen-hip-devel \
rccl-devel \
rocblas-devel \
rocm-device-libs \
rocprim-devel \
rocrand-devel \
rocthrust-devel \
# end packages required to build vllm
wget \
which && \
microdnf clean all
WORKDIR /workspace
ENV LLVM_SYMBOLIZER_PATH=/opt/rocm/llvm/bin/llvm-symbolizer
ENV PATH=$PATH:/opt/rocm/bin
ENV CPLUS_INCLUDE_PATH=$VIRTUAL_ENV/lib/python${PYTHON_VERSION}/site-packages/torch/include:/opt/rocm/include
FROM rocm_devel AS build_amdsmi
# Build AMD SMI wheel
RUN cd /opt/rocm/share/amd_smi && \
python3 -m pip wheel . --wheel-dir=/install
##################################################################################################
FROM rocm_devel AS build_flashattention
ARG FA_GFX_ARCHS="gfx90a;gfx942"
# the FA_BRANCH commit belongs to the ROCm/flash-attention fork, `main_perf` branch
ARG FA_BRANCH="3cea2fb"
ARG MAX_JOBS
ENV MAX_JOBS=${MAX_JOBS}
RUN --mount=type=cache,target=/root/.cache/uv \
--mount=type=cache,target=/workspace/build \
mkdir -p /libs && \
cd /libs && \
git clone https://github.com/ROCm/flash-attention.git && \
cd flash-attention && \
git checkout ${FA_BRANCH} && \
git submodule update --init && \
uv pip install cmake ninja packaging && \
env \
GPU_ARCHS="${FA_GFX_ARCHS}" \
python3 setup.py bdist_wheel --dist-dir=/install
##################################################################################################
FROM rocm_devel AS build_vllm
ARG PYTORCH_ROCM_ARCH
ARG PYTHON_VERSION
ARG MAX_JOBS
ENV MAX_JOBS=${MAX_JOBS}
ENV PYTORCH_ROCM_ARCH=${PYTORCH_ROCM_ARCH}
COPY . .
ENV VLLM_TARGET_DEVICE="rocm"
ENV MAX_JOBS=${MAX_JOBS}
# Make sure punica kernels are built (for LoRA)
ENV VLLM_INSTALL_PUNICA_KERNELS=1
RUN --mount=type=cache,target=/root/.cache/ccache \
--mount=type=cache,target=/root/.cache/pip \
--mount=type=cache,target=/root/.cache/uv \
uv pip install -v -U \
ninja setuptools-scm>=8 "cmake>=3.26" packaging && \
env CFLAGS="-march=haswell" \
CXXFLAGS="$CFLAGS $CXXFLAGS" \
CMAKE_BUILD_TYPE=Release \
python3 setup.py bdist_wheel --dist-dir=dist
#################### libsodium Build IMAGE ####################
FROM rocm_base as libsodium-builder
RUN microdnf install -y gcc gzip tar \
&& microdnf clean all
WORKDIR /usr/src/libsodium
ARG LIBSODIUM_VERSION=1.0.20
RUN curl -LO https://github.com/jedisct1/libsodium/releases/download/${LIBSODIUM_VERSION}-RELEASE/libsodium-${LIBSODIUM_VERSION}.tar.gz \
&& tar -xzvf libsodium*.tar.gz \
&& rm -f libsodium*.tar.gz \
&& mv libsodium*/* ./
RUN CFLAGS="-O3 -Wall -Werror=format-security -Wno-unused-function -Wp,-D_GLIBCXX_ASSERTIONS -fstack-protector-strong -fstack-clash-protection -fcf-protection" \
./configure \
--prefix="/usr/" \
--libdir=/usr/lib64 && \
make -j $(nproc) && \
make check
##################################################################################################
FROM rocm_base AS vllm-openai
ARG MAX_JOBS
WORKDIR /workspace
ENV VIRTUAL_ENV=/opt/vllm
ENV PATH=$VIRTUAL_ENV/bin:$PATH
# Required for triton
RUN microdnf install -y --setopt=install_weak_deps=0 --nodocs gcc rsync && \
microdnf clean all
# Install libsodium for Tensorizer encryption
RUN --mount=type=bind,from=libsodium-builder,src=/usr/src/libsodium,target=/usr/src/libsodium \
cd /usr/src/libsodium \
&& make install
RUN --mount=type=bind,from=build_amdsmi,src=/install,target=/install/amdsmi/ \
--mount=type=bind,from=build_flashattention,src=/install,target=/install/flashattention \
--mount=type=bind,from=build_vllm,src=/workspace/dist,target=/install/vllm/ \
--mount=type=cache,target=/root/.cache/pip \
--mount=type=cache,target=/root/.cache/uv \
export version="$(awk -F. '{print $1"."$2}' <<< $ROCM_VERSION)" && \
uv pip install \
--index-strategy=unsafe-best-match \
--extra-index-url "https://download.pytorch.org/whl/nightly/rocm${version}" \
/install/amdsmi/*.whl\
/install/flashattention/*.whl\
/install/vllm/*.whl
ENV HF_HUB_OFFLINE=1 \
HOME=/home/vllm \
# Allow requested max length to exceed what is extracted from the
# config.json
# see: https://github.com/vllm-project/vllm/pull/7080
VLLM_ALLOW_LONG_MAX_MODEL_LEN=1 \
VLLM_USAGE_SOURCE=production-docker-image \
VLLM_WORKER_MULTIPROC_METHOD=fork \
VLLM_NO_USAGE_STATS=1 \
# Silences the HF Tokenizers warning
TOKENIZERS_PARALLELISM=false \
RAY_EXPERIMENTAL_NOSET_ROCR_VISIBLE_DEVICES=1 \
VLLM_USE_TRITON_FLASH_ATTN=0 \
HIP_FORCE_DEV_KERNARG=1 \
OUTLINES_CACHE_DIR=/tmp/outlines \
NUMBA_CACHE_DIR=/tmp/numba \
TRITON_CACHE_DIR=/tmp/triton
# setup non-root user for OpenShift
RUN umask 002 && \
useradd --uid 2000 --gid 0 vllm && \
mkdir -p /licenses /home/vllm && \
chmod g+rwx /home/vllm
COPY LICENSE /licenses/vllm.md
COPY examples/*.jinja /app/data/template/
USER 2000
WORKDIR /home/vllm
ENTRYPOINT ["python3", "-m", "vllm.entrypoints.openai.api_server"]
FROM vllm-openai as vllm-grpc-adapter
USER root
RUN --mount=type=cache,target=/root/.cache/pip \
--mount=type=cache,target=/root/.cache/uv \
--mount=type=bind,from=build_vllm,src=/workspace/dist,target=/install/vllm/ \
HOME=/root uv pip install /install/vllm/*.whl vllm-tgis-adapter==0.5.3
ENV GRPC_PORT=8033 \
PORT=8000 \
# As an optimization, vLLM disables logprobs when using spec decoding by
# default, but this would be unexpected to users of a hosted model that
# happens to have spec decoding
# see: https://github.com/vllm-project/vllm/pull/6485
DISABLE_LOGPROBS_DURING_SPEC_DECODING=false
USER 2000
ENTRYPOINT ["python3", "-m", "vllm_tgis_adapter", "--uvicorn-log-level=warning"]