|
| 1 | +from typing import Optional, Sequence |
| 2 | + |
| 3 | +import logging |
| 4 | +from dataclasses import asdict |
| 5 | + |
| 6 | +from ..config import LLMConfigurationError |
| 7 | +from ..errors import LLMImportError |
| 8 | +from . import LLMClient |
| 9 | +from .base import ChatMessage |
| 10 | + |
| 11 | +try: |
| 12 | + import groq |
| 13 | + from groq import Groq |
| 14 | +except ImportError as err: |
| 15 | + raise LLMImportError(flavor="llm") from err |
| 16 | + |
| 17 | +AUTH_ERROR_MESSAGE = ( |
| 18 | + "Could not authenticate with Groq API. Please make sure you have configured the API key by " |
| 19 | + "setting GROQ_API_KEY in the environment." |
| 20 | +) |
| 21 | + |
| 22 | +JSON_MODE_GUIDANCE = ( |
| 23 | + "To use JSON mode, make sure:\n" |
| 24 | + "1. Pass format='json' or format='json_object' to the `complete()` method.\n" |
| 25 | + "2. You describe the expected JSON structure clearly in the system prompt.\n" |
| 26 | + "3. The selected model supports JSON output.\n" |
| 27 | + "See: https://console.groq.com/docs/text-chat#json-mode" |
| 28 | +) |
| 29 | + |
| 30 | +logger = logging.getLogger(__name__) |
| 31 | + |
| 32 | + |
| 33 | +class GroqClient(LLMClient): |
| 34 | + def __init__( |
| 35 | + self, |
| 36 | + model: str = "llama-3.3-70b-versatile", # Default model for Groq |
| 37 | + client: Groq = None, |
| 38 | + # json_mode: Optional[bool] = None |
| 39 | + ): |
| 40 | + logger.info(f"Initializing GroqClient with model: {model}") |
| 41 | + self.model = model |
| 42 | + self._client = client or Groq() |
| 43 | + logger.info("GroqClient initialized successfully") |
| 44 | + |
| 45 | + def get_config(self) -> dict: |
| 46 | + """Return the configuration of the LLM client.""" |
| 47 | + return {"client_type": self.__class__.__name__, "model": self.model} |
| 48 | + |
| 49 | + def complete( |
| 50 | + self, |
| 51 | + messages: Sequence[ChatMessage], |
| 52 | + temperature: float = 1.0, |
| 53 | + max_tokens: Optional[int] = None, |
| 54 | + caller_id: Optional[str] = None, |
| 55 | + seed: Optional[int] = None, |
| 56 | + format: Optional[str] = None, |
| 57 | + ) -> ChatMessage: |
| 58 | + logger.info(f"GroqClient.complete called with model: {self.model}") |
| 59 | + logger.info(f"Messages: {messages}") |
| 60 | + |
| 61 | + extra_params = dict() |
| 62 | + |
| 63 | + extra_params["seed"] = seed |
| 64 | + |
| 65 | + if format in {"json", "json_object"}: |
| 66 | + extra_params["response_format"] = {"type": "json_object"} |
| 67 | + |
| 68 | + try: |
| 69 | + completion = self._client.chat.completions.create( |
| 70 | + model=self.model, |
| 71 | + messages=[asdict(m) for m in messages], |
| 72 | + temperature=temperature, |
| 73 | + max_tokens=max_tokens, |
| 74 | + **extra_params, |
| 75 | + ) |
| 76 | + |
| 77 | + except groq.AuthenticationError as err: |
| 78 | + raise LLMConfigurationError(AUTH_ERROR_MESSAGE) from err |
| 79 | + |
| 80 | + except groq.BadRequestError as err: |
| 81 | + if format in {"json", "json_object"}: |
| 82 | + raise LLMConfigurationError( |
| 83 | + f"Model '{self.model}' does not support JSON output or the request format is incorrect.\n\n{JSON_MODE_GUIDANCE}" |
| 84 | + ) from err |
| 85 | + raise |
| 86 | + |
| 87 | + self.logger.log_call( |
| 88 | + prompt_tokens=completion.usage.prompt_tokens, |
| 89 | + sampled_tokens=completion.usage.completion_tokens, |
| 90 | + model=self.model, |
| 91 | + client_class=self.__class__.__name__, |
| 92 | + caller_id=caller_id, |
| 93 | + ) |
| 94 | + |
| 95 | + msg = completion.choices[0].message |
| 96 | + |
| 97 | + return ChatMessage(role=msg.role, content=msg.content) |
0 commit comments