|
1 | | -import argparse |
2 | 1 | import unittest |
3 | | -from hackingBuddyGPT.usecases.base import use_cases |
4 | | -from hackingBuddyGPT.usecases.web_api_testing.simple_web_api_testing import SimpleWebAPITestingUseCase |
5 | | -from hackingBuddyGPT.utils import DbStorage, Console |
| 2 | +from unittest.mock import MagicMock, patch |
6 | 3 |
|
| 4 | +from hackingBuddyGPT.usecases import SimpleWebAPITesting |
| 5 | +from hackingBuddyGPT.usecases.web import MinimalWebTesting |
| 6 | +from hackingBuddyGPT.usecases.web_api_testing.simple_openapi_documentation import SimpleWebAPIDocumentationUseCase |
| 7 | +from hackingBuddyGPT.utils import DbStorage, Console |
| 8 | +from hackingBuddyGPT.utils.openai.openai_lib import OpenAILib |
7 | 9 |
|
8 | 10 |
|
9 | | -class WebAPIDocumentationTestCase(unittest.TestCase): |
10 | | - def test_simple_web_api_testing(self): |
| 11 | +class TestSimpleWebAPITesting(unittest.TestCase): |
11 | 12 |
|
| 13 | + @patch('hackingBuddyGPT.utils.openai.openai_lib.OpenAILib') |
| 14 | + def setUp(self, MockOpenAILib): |
| 15 | + # Mock the OpenAILib instance |
| 16 | + self.mock_llm = MockOpenAILib.return_value |
12 | 17 | log_db = DbStorage(':memory:') |
13 | 18 | console = Console() |
14 | 19 |
|
15 | 20 | log_db.init() |
16 | | - parser = argparse.ArgumentParser() |
17 | | - subparser = parser.add_subparsers(required=True) |
18 | | - for name, use_case in use_cases.items(): |
19 | | - use_case.build_parser(subparser.add_parser( |
20 | | - name=use_case.name, |
21 | | - help=use_case.description |
22 | | - )) |
23 | | - |
24 | | - parsed = parser.parse_args(["SimpleWebAPIDocumentation"]) |
25 | | - instance = parsed.use_case(parsed) |
26 | | - |
27 | | - agent = instance.agent |
28 | | - simple_web_api_documentation = SimpleWebAPITestingUseCase( |
29 | | - agent=agent, |
| 21 | + self.agent = SimpleWebAPITesting(llm=self.mock_llm) |
| 22 | + self.agent.init() |
| 23 | + self.simple_api_testing = SimpleWebAPIDocumentationUseCase( |
| 24 | + agent=self.agent, |
30 | 25 | log_db=log_db, |
31 | 26 | console=console, |
32 | | - tag='web_api_documentation', |
33 | | - max_turns=20 |
| 27 | + tag='integration_test_linuxprivesc', |
| 28 | + max_turns=len(self.mock_llm.responses) |
34 | 29 | ) |
| 30 | + self.simple_api_testing.init() |
| 31 | + |
| 32 | + def test_initial_prompt(self): |
| 33 | + # Test if the initial prompt is set correctly |
| 34 | + self.assertIn("You're a seasoned API tester tasked with evaluating the REST APIs of a website hosted at https://jsonplaceholder.typicode.com. Your primary objective is to thoroughly explore the APIs, understanding their endpoints, parameters, and responses, and then devise and execute potential exploits. Be diligent in documenting your findings as you navigate through the APIs. Avoid resorting to brute-force tactics. All necessary information can be obtained from the API endpoints themselves. Remember, if you encounter an HTTP method (A string that represents an HTTP method (e.g., 'GET', 'POST', etc.).), promptly submit it as it is of utmost importance.", self.agent._prompt_history[0]['content']) |
| 35 | + |
| 36 | + def test_all_flags_found(self): |
| 37 | + # Mock console.print to suppress output during testing |
| 38 | + with patch('rich.console.Console.print'): |
| 39 | + self.agent.all_http_methods_found() |
| 40 | + self.assertFalse(self.agent.all_http_methods_found()) |
| 41 | + |
| 42 | + @patch('time.perf_counter', side_effect=[1, 2]) # Mocking perf_counter for consistent timing |
| 43 | + def test_perform_round(self, mock_perf_counter): |
| 44 | + # Prepare mock responses |
| 45 | + mock_response = MagicMock() |
| 46 | + mock_completion = MagicMock() |
| 47 | + |
| 48 | + # Setup completion response with mocked data |
| 49 | + mock_completion.choices[0].message.content = "Mocked LLM response" |
| 50 | + mock_completion.choices[0].message.tool_calls = [MagicMock(id="tool_call_1")] |
| 51 | + mock_completion.usage.prompt_tokens = 10 |
| 52 | + mock_completion.usage.completion_tokens = 20 |
| 53 | + |
| 54 | + # Mock the OpenAI LLM response |
| 55 | + self.agent.llm.instructor.chat.completions.create_with_completion.return_value = ( |
| 56 | + mock_response, mock_completion) |
| 57 | + |
| 58 | + # Mock the tool execution result |
| 59 | + mock_response.execute.return_value = "Mocked tool execution result" |
| 60 | + |
| 61 | + # Perform the round |
| 62 | + result = self.agent.perform_round(1) |
| 63 | + |
| 64 | + # Assertions |
| 65 | + self.assertFalse(result) # No flags found in this round |
| 66 | + |
| 67 | + # Check if the LLM was called with the correct parameters |
| 68 | + mock_create_with_completion = self.agent.llm.instructor.chat.completions.create_with_completion |
35 | 69 |
|
36 | | - simple_web_api_documentation.init() |
37 | | - result = simple_web_api_documentation.run() |
38 | | - print(f'result: {result}') |
39 | | - assert result is True |
| 70 | + # if it can be called multiple times, use assert_called |
| 71 | + self.assertEqual( 2, mock_create_with_completion.call_count) |
40 | 72 |
|
| 73 | + # Check if the prompt history was updated correctly |
| 74 | + self.assertEqual(5, len(self.agent._prompt_history)) # Initial message + LLM response + tool message |
41 | 75 |
|
42 | 76 | if __name__ == '__main__': |
43 | 77 | unittest.main() |
0 commit comments