@@ -15,6 +15,11 @@ class LLMResult:
1515 tokens_query : int = 0
1616 tokens_response : int = 0
1717
18+
19+ TPL_NEXT = Template (filename = 'templates/query_next_command.txt' )
20+ TPL_ANALYZE = Template (filename = "templates/analyze_cmd.txt" )
21+ TPL_STATE = Template (filename = "templates/update_state.txt" )
22+
1823class LLMWithState :
1924 def __init__ (self , run_id , llm_connection , history , config ):
2025 self .llm_connection = llm_connection
@@ -35,13 +40,10 @@ def get_state_size(self, model):
3540
3641 def get_next_cmd (self ):
3742
38- template_file = 'query_next_command.txt'
3943 model = self .llm_connection .get_model ()
4044
4145 state_size = self .get_state_size (model )
42-
43- template = Template (filename = 'templates/' + template_file )
44- template_size = num_tokens_from_string (model , template .source )
46+ template_size = num_tokens_from_string (model , TPL_NEXT .source )
4547
4648 history = get_cmd_history_v3 (model , self .llm_connection .get_context_size (), self .run_id , self .db , state_size + template_size )
4749
@@ -50,7 +52,7 @@ def get_next_cmd(self):
5052 else :
5153 target_user = "Administrator"
5254
53- return self .create_and_ask_prompt_text (template_file , history = history , state = self .state , target = self .target , update_state = self .enable_update_state , target_user = target_user )
55+ return self .create_and_ask_prompt_text (TPL_NEXT , history = history , state = self .state , target = self .target , update_state = self .enable_update_state , target_user = target_user )
5456
5557 def analyze_result (self , cmd , result ):
5658
@@ -66,20 +68,19 @@ def analyze_result(self, cmd, result):
6668 result = result [cut_off :]
6769 current_size = num_tokens_from_string (model , result )
6870
69- result = self .create_and_ask_prompt_text ('analyze_cmd.txt' , cmd = cmd , resp = result , facts = self .state )
71+ result = self .create_and_ask_prompt_text (TPL_ANALYZE , cmd = cmd , resp = result , facts = self .state )
7072 return result
7173
7274 def update_state (self , cmd , result ):
73- result = self .create_and_ask_prompt_text ('update_state.txt' , cmd = cmd , resp = result , facts = self .state )
75+ result = self .create_and_ask_prompt_text (TPL_STATE , cmd = cmd , resp = result , facts = self .state )
7476 self .state = result .result
7577 return result
7678
7779 def get_current_state (self ):
7880 return self .state
7981
80- def create_and_ask_prompt_text (self , template_file , ** params ):
82+ def create_and_ask_prompt_text (self , template , ** params ):
8183 # prepare the prompt
82- template = Template (filename = 'templates/' + template_file )
8384 prompt = template .render (** params )
8485
8586 if not self .llm_connection .get_model ().startswith ("gpt-" ):
0 commit comments