Skip to content
Merged

V3 #2

Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
add some animations (:
  • Loading branch information
andreashappe committed Sep 14, 2023
commit 2f7e70e6043d5889c0e6bc2beaba0333334f8ce5
6 changes: 2 additions & 4 deletions llm_with_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,8 @@ def get_next_cmd(self):

def analyze_result(self, cmd, result):
result = self.create_and_ask_prompt('successfull.txt', cmd=cmd, resp=result, facts=self.state)

print("new state: " + str(result.result["facts"]))

self.tmp_state = result.result["facts"]

return result

def update_state(self):
Expand All @@ -51,7 +49,7 @@ def create_and_ask_prompt(self, template_file, **params):
tic = time.perf_counter()
result, tok_query, tok_res = self.llm_connection.exec_query(prompt)
toc = time.perf_counter()
print(str(result))
print("debug[the plain result]: " + str(result))
json_answer = json.loads(result)

return LLMResult(json_answer, toc-tic, tok_query, tok_res)
34 changes: 19 additions & 15 deletions wintermute.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,15 @@
# setup dotenv
load_dotenv()

# setup in-memory storage
# setup in-memory storage for command history
db = DbStorage()
db.connect()
db.setup_db()

# create an identifier for this session/run
run_id = db.create_new_run(os.getenv("MODEL"), os.getenv("CONTEXT_SIZE"))

# setup some infrastructure
# setup some infrastructure for outputing information
console = Console()

# open SSH connection to target
Expand All @@ -46,26 +46,30 @@
while round < max_rounds and not gotRoot:

console.log(f"Starting round {round} of {max_rounds}")
answer = llm_gpt.get_next_cmd()
with console.status("[bold green]Asking LLM for a new command...") as status:
answer = llm_gpt.get_next_cmd()

if answer.result["type"] == "cmd":
cmd, result, gotRoot = handle_cmd(conn, answer.result)
elif answer.result["type"] == "ssh":
cmd, result = handle_ssh(answer.result)
with console.status("[bold green]Executing that command...") as status:
if answer.result["type"] == "cmd":
cmd, result, gotRoot = handle_cmd(conn, answer.result)
elif answer.result["type"] == "ssh":
cmd, result = handle_ssh(answer.result)

db.add_log_query(run_id, round, cmd, result, answer)

# output the command and it's result
console.print(Panel(result, title=cmd))

# analyze the result and update your state
answer = llm_gpt.analyze_result(cmd, result)
db.add_log_analyze_response(run_id, round, cmd, answer.result["reason"], answer)
# analyze the result..
with console.status("[bold green]Analyze it's result...") as status:
answer = llm_gpt.analyze_result(cmd, result)
db.add_log_analyze_response(run_id, round, cmd, answer.result["reason"], answer)

state = llm_gpt.update_state()
console.print(Panel(state.result, title="my new fact list"))
db.add_log_update_state(run_id, round, "", state.result, None)
# .. and let our local model representatino update its state
state = llm_gpt.update_state()
console.print(Panel(state.result, title="my new fact list"))
db.add_log_update_state(run_id, round, "", state.result, None)

# update our command history and output it
# update command history and output it
console.print(get_history_table(run_id, db, round))
round += 1
round += 1