1- from typing import Dict , Callable
2- from andromeda_chain import AndromedaChain
3- from agents .flow_based import BaseAgent , BaseFlowAgent
4- from flow .flow import Flow , PromptNode , ToolNode , ChoiceNode , StartNode
5- from prompts .flow_guidance_cot import FlowChainOfThoughts , PROMPT_START_STRING
1+ from agents .base import BaseAgent
62from guidance_tooling .guidance_programs .tools import ingest_file
73from guidance_tooling .guidance_programs .tools import clean_text
8- from guidance_tooling .guidance_programs .tools import load_tools
94
105import os
116from langchain .llms import LlamaCpp
12- from dotenv import load_dotenv
137import os
14- from chromadb .config import Settings
158from colorama import Fore , Style
169from langchain .chains import RetrievalQA
1710from langchain .llms import LlamaCpp
@@ -25,58 +18,15 @@ def get_llm():
2518 global llm
2619 if llm is None :
2720 print ("Loading guidance model..." )
28- model_type = "LlamaCpp"
2921 model_path = "/home/karajan/Downloads/airoboros-13b-gpt4.ggmlv3.q8_0.bin"
3022 model_n_ctx = 1000
31- target_source_chunks = 4
3223 n_gpu_layers = 500
3324 use_mlock = 0
3425 n_batch = os .environ .get ('N_BATCH' ) if os .environ .get ('N_BATCH' ) != None else 512
3526 callbacks = []
36- qa_prompt = ""
3727 llm = LlamaCpp (model_path = model_path , n_ctx = model_n_ctx , callbacks = callbacks , verbose = False ,n_gpu_layers = n_gpu_layers , use_mlock = use_mlock ,top_p = 0.9 , n_batch = n_batch )
3828 return llm
39-
40- class ChainOfThoughtsFlowAgent (BaseFlowAgent ):
41- def __init__ (self , andromeda : AndromedaChain , tools : Dict [str , Callable [[str ], str ]]):
42- def execute_tool (variables ):
43- action_name = variables ["tool_name" ]
44- action_input = variables ["act_input" ]
45- return self .do_tool (action_name , action_input )
46-
47- start = StartNode ("start" , FlowChainOfThoughts .flow_prompt_start , {
48- "Action" : "choose_action" ,
49- "Final Answer" : "final_prompt"
50- })
51- thought = PromptNode ("thought" , FlowChainOfThoughts .thought_gen )
52- choose_action = PromptNode ("choose_action" , FlowChainOfThoughts .choose_action )
53- perform_action = PromptNode ("perform_action" , FlowChainOfThoughts .action_input )
54- execute_tool_node = ToolNode ("execute_tool" , execute_tool )
55- decide = ChoiceNode ("decide" , ["thought" , "final_prompt" ], max_decisions = 3 , force_exit_on = "final_prompt" )
56- final = PromptNode ("final_prompt" , FlowChainOfThoughts .final_prompt )
57-
58- thought .set_next (choose_action )
59- choose_action .set_next (perform_action )
60- perform_action .set_next (execute_tool_node )
61- execute_tool_node .set_next (decide )
62-
63- flow = Flow (
64- [start , thought , choose_action , perform_action , execute_tool_node , decide , final ]
65- )
66-
67- super ().__init__ (andromeda , tools , flow )
68- self .valid_tools = list (tools .keys ())
69- self .valid_answers = ["Action" , "Final Answer" ]
70-
71-
72- def run (self , query : str ) -> str :
73- return super ().run (query , variables = {
74- "prompt_start" : PROMPT_START_STRING ,
75- "question" : query ,
76- "valid_tools" : self .valid_tools ,
77- "valid_answers" : self .valid_answers ,
78- })
79-
29+
8030class ChainOfThoughtsAgent (BaseAgent ):
8131 def __init__ (self , guidance , retriever , num_iter = 3 ):
8232 self .guidance = guidance
0 commit comments