1+ import openai
2+ import os
3+ from dotenv import load_dotenv
4+ load_dotenv ()
5+
6+ # OpenAI API key setup
7+ openai .api_key = "YOUR_OPENAI_API_KEY"
8+ from groq import AsyncGroq
9+
10+ client = AsyncGroq (
11+ api_key = os .getenv ('GROQ_API_KEY' ),
12+ )
13+
14+
15+
16+ async def chat_handler (query : str ):
17+ response = await openai .ChatCompletion .create (
18+ model = "gpt-4" , # or another model you want to use
19+ messages = [
20+ {"role" : "user" , "content" : query }
21+ ]
22+ )
23+ return response .choices [0 ].message ['content' ]
24+
25+
26+
27+ async def transcribe_audio (file_content : bytes , file_name : str = "audio.m4a" ) -> str :
28+ """
29+ Transcribe the given audio file content using Groq API.
30+
31+ :param file_content: The audio file content as bytes.
32+ :param file_name: The name of the file, default is "audio.m4a".
33+ :return: The transcription text.
34+ """
35+ try :
36+ # Create a transcription of the audio file
37+ transcription = await client .audio .transcriptions .create (
38+ file = (file_name , file_content ),
39+ model = "distil-whisper-large-v3-en" ,
40+ prompt = "Specify context or spelling" ,
41+ response_format = "json" ,
42+ language = "en" ,
43+ temperature = 0.0
44+ )
45+
46+ return transcription .text
47+
48+ except Exception as e :
49+ raise RuntimeError (f"Error during transcription: { str (e )} " )
0 commit comments