11#!/usr/bin/python
22
3- import os
4- import openai
5-
63from dotenv import load_dotenv
7- from mako .template import Template
84
95from history import ResultHistory
10- from ssh import SSHHostConn
11- from logwriter import LogHelper
6+ from targets .ssh import get_ssh_connection
7+ from llms .openai import openai_config
8+ from prompt_helper import create_and_ask_prompt
129
13- logs = LogHelper ()
10+ # setup some infrastructure
11+ cmd_history = ResultHistory ()
1412
13+ # read configuration from env and configure system parts
1514load_dotenv ()
16-
17- openai .api_key = os .getenv ('OPENAI_KEY' )
18- model = os .getenv ('MODEL' )
19-
20- conn = SSHHostConn (os .getenv ('TARGET_IP' ), os .getenv ('TARGET_USER' ), os .getenv ('TARGET_PASSWORD' ))
15+ openai_config ()
16+ conn = get_ssh_connection ()
2117conn .connect ()
2218
19+ print ("Get initial user from virtual machine:" )
2320initial_user = conn .run ("whoami" )
2421
25- def get_openai_response (cmd ):
26- completion = openai .ChatCompletion .create (model = model , messages = [{"role" : "user" , "content" : cmd }])
27- result = completion .choices [0 ].message .content
28- return result
29-
30- cmd_history = ResultHistory ()
31-
32- mytemplate = Template (filename = 'templates/gpt_query.txt' )
33- whytemplate = Template (filename = 'templates/why.txt' )
34- furthertemplate = Template (filename = 'templates/further_information.txt' )
35-
3622while True :
3723
38- cmd = mytemplate .render (user = initial_user , history = cmd_history .dump ())
39- logs .warning ("openai-prompt" , cmd )
40-
41- print ("now thinking.." )
42- next_cmd = get_openai_response (cmd )
43- logs .warning ("openai-next-command" , next_cmd )
24+ next_cmd = create_and_ask_prompt ('gpt_query.txt' , "next-cmd" , user = initial_user , history = cmd_history .dump ())
4425
45- if False :
46- # disable this for now, it's tragic because the AI won't tell me why it had chosen something
47- print ("now thinking why did I choose this? can we put both questions into a single prompt?" )
48- why = whytemplate .render (user = initial_user , history = cmd_history .dump (), next_cmd = next_cmd )
49- why_response = get_openai_response (why )
50- logs .warning ("why" , why_response )
26+ # disable this for now, it's tragic because the AI won't tell me why it had chosen something
27+ # create_and_ask_prompt("why.txt", "why", user=initial_user, history=cmd_history.dump(), next_cmd=next_cmd)
5128
52-
53- print ("running the command.." )
5429 resp = conn .run (next_cmd )
55- logs . warning ( "server-output" , resp )
30+ cmd_history . append ( next_cmd , resp )
5631
57- print ("now thinking about more exploits" )
58- vulns = furthertemplate .render (user = initial_user , next_cmd = next_cmd , resp = resp )
59- print (vulns )
60- vulns_resp = get_openai_response (vulns )
61- logs .warning ("vulns" , vulns_resp )
32+ # this will already by output by conn.run
33+ # logs.warning("server-output", resp)
6234
63- cmd_history .append (next_cmd , resp )
35+ # this asks for additional vulnerabilities identifiable in the last command output
36+ # create_and_ask_prompt('further_information.txt', 'vulns', user=initial_user, next_cmd=next_cmd, resp=resp)
0 commit comments