Skip to content

Commit 5413c13

Browse files
committed
added Analyze component
1 parent 39ea28c commit 5413c13

File tree

3 files changed

+33
-15
lines changed

3 files changed

+33
-15
lines changed

‎src/hackingBuddyGPT/usecases/rag/common.py‎

Lines changed: 22 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
@dataclass
2323
class ThesisPrivescPrototype(Agent):
2424
system: str = ""
25-
enable_explanation: bool = False
25+
enable_analysis: bool = False
2626
enable_update_state: bool = False
2727
enable_compressed_history: bool = False
2828
disable_history: bool = False
@@ -87,16 +87,14 @@ def perform_round(self, turn: int) -> bool:
8787
# log and output the command and its result
8888
if self._sliding_history:
8989
if self.enable_compressed_history:
90-
self._sliding_history.add_command_only(cmd, result)
90+
self._sliding_history.add_command_only(cmds, result)
9191
else:
92-
self._sliding_history.add_command(cmd, result)
92+
self._sliding_history.add_command(cmds, result)
9393

9494
# analyze the result..
95-
if self.enable_explanation:
96-
self.analyze_result(cmd, result)
95+
if self.enable_analysis:
96+
self.analyze_result(cmds, result)
9797

98-
# Output Round Data.. # TODO: reimplement
99-
# self.log.console.print(ui.get_history_table(self.enable_explanation, self.enable_update_state, self.log.run_id, self.log.log_db, turn))
10098

10199
# if we got root, we can stop the loop
102100
return got_root
@@ -113,18 +111,25 @@ def get_structure_guidance_size(self) -> int:
113111
else:
114112
return 0
115113

114+
def get_analyze_size(self) -> int:
115+
if self.enable_analysis:
116+
return self.llm.count_tokens(self._analyze)
117+
else:
118+
return 0
119+
116120
@log_conversation("Asking LLM for a new command...", start_section=True)
117121
def get_next_command(self) -> tuple[str, int]:
118122
history = ""
119123
if not self.disable_history:
120124
if self.enable_compressed_history:
121-
history = self._sliding_history.get_commands_and_last_output(self._max_history_size - self.get_chain_of_thought_size() - self.get_structure_guidance_size())
125+
history = self._sliding_history.get_commands_and_last_output(self._max_history_size - self.get_chain_of_thought_size() - self.get_structure_guidance_size() - self.get_analyze_size())
122126
else:
123-
history = self._sliding_history.get_history(self._max_history_size - self.get_chain_of_thought_size() - self.get_structure_guidance_size())
127+
history = self._sliding_history.get_history(self._max_history_size - self.get_chain_of_thought_size() - self.get_structure_guidance_size() - self.get_analyze_size())
124128

125129
self._template_params.update({
126130
"history": history,
127131
'CoT': self._chain_of_thought,
132+
'analyze': self._analyze,
128133
'guidance': self._structure_guidance
129134
})
130135

@@ -162,13 +167,16 @@ def run_command(self, cmd, message_id) -> tuple[Optional[str], Optional[str], bo
162167

163168
@log_conversation("Analyze its result...", start_section=True)
164169
def analyze_result(self, cmd, result):
165-
state_size = self.get_state_size()
166-
target_size = self.llm.context_size - llm_util.SAFETY_MARGIN - state_size
170+
ctx = self.llm.context_size
167171

168-
# ugly, but cut down result to fit context size
172+
template_size = self.llm.count_tokens(template_analyze.source)
173+
target_size = ctx - llm_util.SAFETY_MARGIN - template_size # - self.get_rag_size()
169174
result = llm_util.trim_result_front(self.llm, target_size, result)
170-
answer = self.llm.get_response(template_analyze, cmd=cmd, resp=result, facts=self._state)
171-
self.log.call_response(answer)
175+
176+
# result = self.llm.get_response(template_analyze, cmd=cmd, resp=result, rag_enabled=self.enable_rag, rag_text=self._rag_text, hint=self.hint)
177+
result = self.llm.get_response(template_analyze, cmd=cmd, resp=result, hint=self.hint)
178+
self._analyze = result.result
179+
self.log.call_response(result)
172180

173181
def split_into_multiple_commands(self, response: str):
174182
ret = self.split_with_delimiters(response, ["test_credential", "exec_command"])

‎src/hackingBuddyGPT/usecases/rag/templates/analyze_cmd.txt‎

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,4 +4,7 @@ You executed the command '${cmd}' and retrieved the following result:
44
${resp}
55
~~~
66

7-
Analyze if this response allows you to determine a way to escalate your privileges into a root shell. Be as concise as possible.
7+
Analyze if the output of the executed command allows you to determine a way to escalate your privileges into a root shell. If you find a command that can grant access to a root shell emphasize it. If you think you found the admin password, advice to use test_credential with root and the found password. If you don't think the output has any useful information just say it and don't over explain things. Be as concise as possible.
8+
% if len(hint) != 0:
9+
You also have the additional information: ${hint}
10+
%endif

‎src/hackingBuddyGPT/usecases/rag/templates/query_next_command.txt‎

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,13 @@ ${history}
1616
Do not repeat already tried escalation attacks.
1717
%endif
1818

19+
% if len(analyze) != 0:
20+
You also have the following analysis of the last command and its output:
21+
~~~
22+
${analyze}
23+
~~~
24+
%endif
25+
1926
% if hint:
2027
You are provided the following guidance: ${hint}
2128
%endif

0 commit comments

Comments
 (0)