mirror of
https://github.com/zebrajr/localGPT.git
synced 2025-12-06 12:20:53 +01:00
add llama-cpp-python to Dockerfile
because it's not present in the requirements.txt but is requred for loading the LLaMa 2 model.
This commit is contained in:
parent
15e96488b6
commit
417b7e606e
|
|
@ -8,7 +8,7 @@ RUN apt-get install -y g++-11 make python3 python-is-python3 pip
|
|||
# only copy what's needed at every step to optimize layer cache
|
||||
COPY ./requirements.txt .
|
||||
# use BuildKit cache mount to drastically reduce redownloading from pip on repeated builds
|
||||
RUN --mount=type=cache,target=/root/.cache CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install --timeout 100 -r requirements.txt
|
||||
RUN --mount=type=cache,target=/root/.cache CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install --timeout 100 -r requirements.txt llama-cpp-python==0.1.83
|
||||
COPY SOURCE_DOCUMENTS ./SOURCE_DOCUMENTS
|
||||
COPY ingest.py constants.py ./
|
||||
# Docker BuildKit does not support GPU during *docker build* time right now, only during *docker run*.
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user