Skip to content

Commit efee6d7

Browse files
Update chat_models_gguf_fast_start.py
1. Error Handling: Add error handling for model loading and rompting to ensure the program doesn't crash unexpectedly. 2. Logging: Instead of using print statements, consider using the logging module for better control over logging levels and outputs. 3. Type Annotations: Add type annotations to the function parameters for better clarity and type checking. 4. Docstrings: Include docstrings for the functions to describe their purpose and parameters.
1 parent 98f166f commit efee6d7

File tree

1 file changed

+28
-12
lines changed

1 file changed

+28
-12
lines changed

‎examples/Models/chat_models_gguf_fast_start.py‎

Lines changed: 28 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,31 +1,47 @@
1-
21
"""This example demonstrates several leading open source chat models running in 4-bit GGUF on local laptop."""
32

43
import time
54
import re
65
from llmware.prompts import Prompt
6+
import logging
77

88

99
# Run the benchmark test
10-
def run_test(model_name, prompt_list):
11-
12-
print(f"\n > Loading model '{model_name}'")
13-
14-
prompter = Prompt().load_model(model_name)
10+
def run_test(model_name: str, prompt_list: list[dict]) -> int:
11+
"""Run the benchmark test on the specified model with the given prompts.
12+
13+
Args:
14+
model_name (str): The name of the model to load.
15+
prompt_list (list[dict]): A list of prompts to test the model with.
16+
17+
Returns:
18+
int: Status code (0 for success).
19+
"""
20+
logging.basicConfig(level=logging.INFO)
21+
22+
logging.info(f"Loading model '{model_name}'")
23+
24+
try:
25+
prompter = Prompt().load_model(model_name)
26+
except Exception as e:
27+
logging.error(f"Failed to load model: {e}")
28+
return 1
1529

1630
for i, entry in enumerate(prompt_list):
17-
1831
start_time = time.time()
19-
print("\n")
20-
print(f"query - {i+1} - {entry['query']}")
32+
logging.info(f"query - {i+1} - {entry['query']}")
2133

22-
response = prompter.prompt_main(entry["query"])
34+
try:
35+
response = prompter.prompt_main(entry["query"])
36+
except Exception as e:
37+
logging.error(f"Error during prompting: {e}")
38+
continue
2339

2440
# Print results
2541
time_taken = round(time.time() - start_time, 2)
2642
llm_response = re.sub("[\n\n]", "\n", response['llm_response'])
27-
print(f"llm_response - {i+1} - {llm_response}")
28-
print(f"time_taken - {i+1} - {time_taken}")
43+
logging.info(f"llm_response - {i+1} - {llm_response}")
44+
logging.info(f"time_taken - {i+1} - {time_taken}")
2945

3046
return 0
3147

0 commit comments

Comments
 (0)