Skip to content

Commit af2c8fe

Browse files
committed
wrap llama2 prompts to get better results
1 parent d564e4f commit af2c8fe

File tree

1 file changed

+18
-2
lines changed

1 file changed

+18
-2
lines changed

‎llm_with_state.py‎

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,25 @@ def get_current_state(self):
8989
return self.state
9090

9191
def create_and_ask_prompt_text(self, template_file, **params):
92+
# prepare the prompt
9293
template = Template(filename='templates/' + template_file)
9394
prompt = template.render(**params)
95+
96+
if not self.llm_connection.get_model().startswith("gpt-"):
97+
prompt = wrap_it_for_llama(prompt)
98+
99+
# and execute it
94100
tic = time.perf_counter()
95101
result, tok_query, tok_res = self.llm_connection.exec_query(self.llm_connection.get_model(), self.llm_connection.get_context_size(), prompt)
96-
toc = time.perf_counter()
97-
return LLMResult(result, prompt, result, toc - tic, tok_query, tok_res)
102+
toc = time.perf_counter()
103+
104+
return LLMResult(result, prompt, result, toc - tic, tok_query, tok_res)
105+
106+
def wrap_it_for_llama(prompt):
107+
return f"""### System:
108+
you are a concise but helful learning tool that aids students trying to find security vulnerabilities
109+
110+
### User:
111+
{prompt}
112+
113+
### Assistant:"""

0 commit comments

Comments
 (0)