mirror of
https://github.com/zebrajr/localGPT.git
synced 2025-12-06 12:20:53 +01:00
This includes the following update. - Support for GGUF models. - Ability to enable/disable chat history - Set parameters in constants.py - Prompt Template for Llama-2 (this works well) and generic template for other models. - Major rewrite of the main run_localGPT.py as run_localGPT_v2.py (This will replace the original version after testing by the community). - and more :)
58 lines
2.2 KiB
Python
58 lines
2.2 KiB
Python
'''
|
|
This file implements prompt template for llama based models.
|
|
Modify the prompt template based on the model you select.
|
|
This seems to have significant impact on the output of the LLM.
|
|
'''
|
|
|
|
from langchain.memory import ConversationBufferMemory
|
|
from langchain.prompts import PromptTemplate
|
|
|
|
# this is specific to Llama-2.
|
|
|
|
system_prompt = """You are a helpful assistant, you will use the provided context to answer user questions.
|
|
Read the given context before answering questions and think step by step. If you can not answer a user question based on
|
|
the provided context, inform the user. Do not use any other information for answering user"""
|
|
|
|
|
|
def get_prompt_template(system_prompt=system_prompt, promptTemplate_type=None, history=False):
|
|
|
|
if promptTemplate_type=="llama":
|
|
B_INST, E_INST = "[INST]", "[/INST]"
|
|
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
|
|
SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS
|
|
if history:
|
|
instruction = """
|
|
Context: {history} \n {context}
|
|
User: {question}"""
|
|
|
|
prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
|
|
prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template)
|
|
else:
|
|
instruction = """
|
|
Context: {context}
|
|
User: {question}"""
|
|
|
|
prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
|
|
prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template)
|
|
|
|
else:
|
|
# change this based on the model you have selected.
|
|
if history:
|
|
prompt_template = system_prompt + """
|
|
|
|
Context: {history} \n {context}
|
|
User: {question}
|
|
Answer:"""
|
|
prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template)
|
|
else:
|
|
prompt_template = system_prompt + """
|
|
|
|
Context: {context}
|
|
User: {question}
|
|
Answer:"""
|
|
prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template)
|
|
|
|
memory = ConversationBufferMemory(input_key="question", memory_key="history")
|
|
|
|
return prompt, memory,
|