Skip to content

Commit 51dc4e7

Browse files
committed
cleanup code-base a bit
1 parent 0672ff2 commit 51dc4e7

File tree

6 files changed

+76
-59
lines changed

6 files changed

+76
-59
lines changed

‎.gitignore‎

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,3 +2,4 @@
22
venv/
33
__pycache__/
44
*.swp
5+
*.log

‎llms/openai.py‎

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
import openai
2+
import os
3+
4+
openapi_model : str = ''
5+
6+
def openai_config():
7+
global openapi_model
8+
9+
api_key = os.getenv('OPENAI_KEY')
10+
model = os.getenv('MODEL')
11+
12+
if api_key != '' and model != '':
13+
openai.api_key = api_key
14+
openapi_model = model
15+
else:
16+
raise Exception("please set OPENAI_KEY and MODEL through environment variables!")
17+
18+
def get_openai_response(cmd):
19+
completion = openai.ChatCompletion.create(model=openapi_model, messages=[{"role": "user", "content" : cmd}])
20+
return completion.choices[0].message.content

‎logwriter.py‎

Lines changed: 0 additions & 15 deletions
This file was deleted.

‎prompt_helper.py‎

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
import logging
2+
3+
from colorama import Fore, Style
4+
from datetime import datetime
5+
from mako.template import Template
6+
7+
from llms.openai import get_openai_response
8+
9+
log = logging.getLogger()
10+
filename = datetime.now().strftime('logs/run_%H_%M_%d_%m_%Y.log')
11+
log.addHandler(logging.FileHandler(filename))
12+
13+
def output_log(self, kind, msg):
14+
print("[" + Fore.RED + kind + Style.RESET_ALL +"]: " + msg)
15+
self.log.warning("[" + kind + "] " + msg)
16+
17+
# helper for generating and executing LLM prompts from a template
18+
def create_and_ask_prompt(template_file, log_prefix, **params):
19+
global logs
20+
21+
template = Template(filename='templates/' + template_file)
22+
prompt = template.render(**params)
23+
logs.warning(log_prefix + "-prompt", prompt)
24+
result = get_openai_response(prompt)
25+
logs.warning(log_prefix + "-answer", result)
26+
return result

‎ssh.py‎ renamed to ‎targets/ssh.py‎

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,18 @@
1+
import os
2+
13
from fabric import Connection
24
from invoke import Responder
35

6+
def get_ssh_connection():
7+
ip = os.getenv('TARGET_IP')
8+
user = os.getenv('TARGET_USER')
9+
password = os.getenv('TARGET_PASSWORD')
10+
11+
if ip != '' and user != '' and password != '':
12+
return SSHHostConn(ip, user, password)
13+
else:
14+
raise Exception("Please configure SSH through environment variables (TARGET_IP, TARGET_USER, TARGET_PASSWORD)")
15+
416
class SSHHostConn:
517

618
def __init__(self, host, username, password):

‎wintermute.py‎

Lines changed: 17 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -1,63 +1,36 @@
11
#!/usr/bin/python
22

3-
import os
4-
import openai
5-
63
from dotenv import load_dotenv
7-
from mako.template import Template
84

95
from history import ResultHistory
10-
from ssh import SSHHostConn
11-
from logwriter import LogHelper
6+
from targets.ssh import get_ssh_connection
7+
from llms.openai import openai_config
8+
from prompt_helper import create_and_ask_prompt
129

13-
logs = LogHelper()
10+
# setup some infrastructure
11+
cmd_history = ResultHistory()
1412

13+
# read configuration from env and configure system parts
1514
load_dotenv()
16-
17-
openai.api_key = os.getenv('OPENAI_KEY')
18-
model = os.getenv('MODEL')
19-
20-
conn = SSHHostConn(os.getenv('TARGET_IP'), os.getenv('TARGET_USER'), os.getenv('TARGET_PASSWORD'))
15+
openai_config()
16+
conn = get_ssh_connection()
2117
conn.connect()
2218

19+
print("Get initial user from virtual machine:")
2320
initial_user = conn.run("whoami")
2421

25-
def get_openai_response(cmd):
26-
completion = openai.ChatCompletion.create(model=model, messages=[{"role": "user", "content" : cmd}])
27-
result = completion.choices[0].message.content
28-
return result
29-
30-
cmd_history = ResultHistory()
31-
32-
mytemplate = Template(filename='templates/gpt_query.txt')
33-
whytemplate = Template(filename='templates/why.txt')
34-
furthertemplate = Template(filename='templates/further_information.txt')
35-
3622
while True:
3723

38-
cmd = mytemplate.render(user=initial_user, history=cmd_history.dump())
39-
logs.warning("openai-prompt", cmd)
40-
41-
print("now thinking..")
42-
next_cmd = get_openai_response(cmd)
43-
logs.warning("openai-next-command", next_cmd)
24+
next_cmd = create_and_ask_prompt('gpt_query.txt', "next-cmd", user=initial_user, history=cmd_history.dump())
4425

45-
if False:
46-
# disable this for now, it's tragic because the AI won't tell me why it had chosen something
47-
print("now thinking why did I choose this? can we put both questions into a single prompt?")
48-
why = whytemplate.render(user=initial_user, history=cmd_history.dump(), next_cmd=next_cmd)
49-
why_response = get_openai_response(why)
50-
logs.warning("why", why_response)
26+
# disable this for now, it's tragic because the AI won't tell me why it had chosen something
27+
# create_and_ask_prompt("why.txt", "why", user=initial_user, history=cmd_history.dump(), next_cmd=next_cmd)
5128

52-
53-
print("running the command..")
5429
resp = conn.run(next_cmd)
55-
logs.warning("server-output", resp)
30+
cmd_history.append(next_cmd, resp)
5631

57-
print("now thinking about more exploits")
58-
vulns = furthertemplate.render(user=initial_user, next_cmd=next_cmd, resp=resp)
59-
print(vulns)
60-
vulns_resp = get_openai_response(vulns)
61-
logs.warning("vulns", vulns_resp)
32+
# this will already by output by conn.run
33+
# logs.warning("server-output", resp)
6234

63-
cmd_history.append(next_cmd, resp)
35+
# this asks for additional vulnerabilities identifiable in the last command output
36+
# create_and_ask_prompt('further_information.txt', 'vulns', user=initial_user, next_cmd=next_cmd, resp=resp)

0 commit comments

Comments
 (0)