Skip to content
Prev Previous commit
Next Next commit
reintroduce quick hack so that we can test non-openai LLMs easily
  • Loading branch information
andreashappe committed Apr 24, 2024
commit 66e8e10eb35f9f9d8fb6c0ec565200d13df665bf
9 changes: 7 additions & 2 deletions utils/openai/openai_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
from utils.configurable import configurable, parameter
from utils.llm_util import LLMResult, LLM


@configurable("openai-compatible-llm-api", "OpenAI-compatible LLM API")
@dataclass
class OpenAIConnection(LLM):
Expand Down Expand Up @@ -69,7 +68,13 @@ def get_response(self, prompt, *, retry: int = 0, **kwargs) -> LLMResult:
return LLMResult(result, prompt, result, toc - tic, tok_query, tok_res)

def encode(self, query) -> list[int]:
return tiktoken.encoding_for_model(self.model).encode(query)
# I know this is crappy for all non-openAI models but sadly this
# has to be good enough for now
if self.model.startswith("gpt-"):
encoding = tiktoken.encoding_for_model(self.model)
else:
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
return encoding.encode(query)


@configurable("openai/gpt-3.5-turbo", "OpenAI GPT-3.5 Turbo")
Expand Down