Skip to content

Commit 88627c5

Browse files
committed
update llama-cpp-python version + search function fix
1 parent 5d0cdaa commit 88627c5

File tree

4 files changed

+9
-10
lines changed

4 files changed

+9
-10
lines changed

app/Dockerfile

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,12 @@ ENV PYTHONPATH "${PYTHONPATH}:/code"
88
ENV BASE_URL="import.meta.env.VITE_BACKEND_BASE_URL"
99
ENV DOCKER_ENV=true
1010

11-
COPY ./requirements.txt /code/requirements.txt
12-
RUN LLAMA_CUBLAS=1 CMAKE_ARGS=-DLLAMA_CUBLAS=on FORCE_CMAKE=1 pip install llama-cpp-python --no-cache-dir --force-reinstall --verbose
11+
COPY ./requirements.txt /code/requirements_local.txt
1312
RUN pip3 install andromeda-chain==0.2.0 --no-deps
1413
RUN pip3 install git+https://github.com/Maximilian-Winter/guidance.git@313c726265c94523375b0dadd8954d19c01e709b
15-
RUN pip3 install -r requirements.txt
14+
RUN pip3 install -r requirements_local.txt
15+
RUN LLAMA_CUBLAS=1 CMAKE_ARGS=-DLLAMA_CUBLAS=on FORCE_CMAKE=1 pip install llama-cpp-python==0.1.65 --no-cache-dir --force-reinstall --verbose
16+
1617

1718
COPY . .
1819

app/agents/chain_of_thoughts.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -118,10 +118,8 @@ def run(self, query: str, context, history) -> str:
118118
self.context = context
119119
self.history = history
120120
prompt = self.guidance(self.prompt_template)
121-
if TEST_MODE =="ON":
122-
result = prompt(question=self.question, context = self.context, history= self.history,search=self.searchQA)
123-
else:
124-
result = prompt(question=self.question, context = self.context, history= self.history)
121+
122+
result = prompt(question=self.question, context = self.context, history= self.history, search=self.searchQA)
125123
return result
126124

127125

app/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
llama-cpp-python==0.1.55
1+
llama-cpp-python==0.1.65
22
git+https://github.com/Maximilian-Winter/guidance.git@313c726265c94523375b0dadd8954d19c01e709b
33
andromeda-chain==0.2.0
44
langchain

app/setup.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#!/bin/bash
22

33
# Install llama-cpp-python with CUBAS ON to allow gpu offloading in local
4-
LLAMA_CUBLAS=1 pip install llama-cpp-python --force-reinstall --verbose
4+
LLAMA_CUBLAS=1 pip install llama-cpp-python==0.1.65 --force-reinstall --verbose
55

66
# Install experimental cpp guidance package
77
pip install git+https://github.com/Maximilian-Winter/guidance.git@313c726265c94523375b0dadd8954d19c01e709b
@@ -10,4 +10,4 @@ pip install git+https://github.com/Maximilian-Winter/guidance.git@313c726265c945
1010
pip install andromeda-chain==0.2.0 --no-deps
1111

1212
# Install packages from requirements_local.txt
13-
pip install -r requirements_local.txt
13+
pip install -r requirements_local.txt

0 commit comments

Comments
 (0)