Skip to content

Commit 41a8134

Browse files
authored
Merge pull request #27 from ipa-lab/andreashappe-patch-1
update minimal example
2 parents 67664c7 + 19962af commit 41a8134

File tree

1 file changed

+14
-19
lines changed

1 file changed

+14
-19
lines changed

‎README.md‎

Lines changed: 14 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -100,29 +100,35 @@ This work is partially based upon our empiric research into [how hackers work](h
100100
The following would create a new (minimal) linux privilege-escalation agent. Through using our infrastructure, this already uses configurable LLM-connections (e.g., for testing OpenAI or locally run LLMs), logs trace data to a local sqlite database for each run, implements a round limit (after which the agent will stop if root has not been achieved until then) and is able to connect to a linux target over SSH for fully-autonomous command execution (as well as password guessing).
101101

102102
~~~ python
103-
template_dir = pathlib.Path(__file__).parent / "templates"
104-
template_next_cmd = Template(filename=str(template_dir / "query_next_command.txt"))
103+
template_dir = pathlib.Path(__file__).parent
104+
template_next_cmd = Template(filename=str(template_dir / "next_cmd.txt"))
105105

106-
@use_case("linux_privesc", "Linux Privilege Escalation")
106+
@use_case("minimal_linux_privesc", "Showcase Minimal Linux Priv-Escalation")
107107
@dataclass
108-
class LinuxPrivesc(RoundBasedUseCase, UseCase, abc.ABC):
108+
class MinimalLinuxPrivesc(RoundBasedUseCase, UseCase, abc.ABC):
109109

110-
system: str = 'linux'
110+
conn: SSHConnection = None
111111

112112
_sliding_history: SlidingCliHistory = None
113113
_capabilities: Dict[str, Capability] = field(default_factory=dict)
114114

115115
def init(self):
116116
super().init()
117-
# provide a shell history of limited size to the LLM
118117
self._sliding_history = SlidingCliHistory(self.llm)
118+
self._capabilities["run_command"] = SSHRunCommand(conn=self.conn)
119+
self._capabilities["test_credential"] = SSHTestCredential(conn=self.conn)
120+
self._template_size = self.llm.count_tokens(template_next_cmd.source)
119121

120122
def perform_round(self, turn):
121123
got_root : bool = False
122124

123125
with self.console.status("[bold green]Asking LLM for a new command..."):
124-
answer = self.get_next_command()
125-
cmd = answer.result
126+
# get as much history as fits into the target context size
127+
history = self._sliding_history.get_history(self.llm.context_size - llm_util.SAFETY_MARGIN - self._template_size)
128+
129+
# get the next command from the LLM
130+
answer = self.llm.get_response(template_next_cmd, _capabilities=self._capabilities, history=history, conn=self.conn)
131+
cmd = llm_util.cmd_output_fixer(cmd.result)
126132

127133
with self.console.status("[bold green]Executing that command..."):
128134
if answer.result.startswith("test_credential"):
@@ -136,19 +142,8 @@ class LinuxPrivesc(RoundBasedUseCase, UseCase, abc.ABC):
136142
self._sliding_history.add_command(cmd, result)
137143
self.console.print(Panel(result, title=f"[bold cyan]{cmd}"))
138144

139-
# Output Round Data..
140-
self.console.print(ui.get_history_table(False, False, self._run_id, self.log_db, turn))
141-
142145
# if we got root, we can stop the loop
143146
return got_root
144-
145-
def get_next_command(self):
146-
template_size = self.llm.count_tokens(template_next_cmd.source)
147-
history = self._sliding_history.get_history(self.llm.context_size - llm_util.SAFETY_MARGIN - template_size)
148-
149-
cmd = self.llm.get_response(template_next_cmd, _capabilities=self._capabilities, history=history, conn=self.conn, system=self.system, target_user="root")
150-
cmd.result = llm_util.cmd_output_fixer(cmd.result)
151-
return cmd
152147
~~~
153148

154149
## Setup and Usage

0 commit comments

Comments
 (0)