Skip to content

Commit b3b1969

Browse files
committed
Initialized as basic Fast api server for handling backend request
0 parents  commit b3b1969

File tree

7 files changed

+157
-0
lines changed

7 files changed

+157
-0
lines changed

‎server/config/llm.py‎

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
import os
2+
from langchain_groq import ChatGroq
3+
from groq import AsyncGroq
4+
5+
client = AsyncGroq(
6+
api_key=os.getenv("GROQ_API_KEY"),
7+
)
8+
9+
10+
11+
llm = ChatGroq(
12+
groq_api_key=os.getenv('GROQ_API_KEY'),
13+
model="llama-3.1-70b-versatile",
14+
temperature=0,
15+
)
16+
17+
audiomodel = ChatGroq(
18+
groq_api_key=os.getenv('GROQ_API_KEY'),
19+
model="distil-whisper-large-v3-en",
20+
)

‎server/main.py‎

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
from fastapi import FastAPI
2+
from routers import chat,search
3+
import os
4+
from dotenv import load_dotenv
5+
load_dotenv()
6+
7+
app = FastAPI()
8+
9+
10+
app.include_router(chat.router)
11+
app.include_router(search.router)
12+
13+
14+

‎server/models/schemas.py‎

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
from pydantic import BaseModel
2+
3+
4+
class Query(BaseModel):
5+
query: str

‎server/routers/chat.py‎

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
from fastapi import APIRouter,HTTPException
2+
from utils.chat import chat_handler
3+
from models.schemas import Query
4+
5+
router = APIRouter()
6+
7+
8+
@router.post("/chat")
9+
async def chat(query: Query):
10+
try:
11+
response = await chat_handler(query.query)
12+
return {"response": response}
13+
except Exception as e:
14+
raise HTTPException(status_code=500, detail=str(e))
15+
16+
@router.get("/complete/")
17+
async def autocomplete(input:str):
18+
try:
19+
pass
20+
except Exception as e:
21+
raise HTTPException(status_code=500, detail=str(e))
22+
23+
24+

‎server/routers/search.py‎

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
from fastapi import APIRouter,HTTPException
2+
from models.schemas import Query
3+
from typing import Optional
4+
from utils.search import search_handler
5+
6+
router = APIRouter()
7+
8+
9+
@router.post("/search")
10+
async def search(query: Query, search_type: Optional[str] = "text"):
11+
try:
12+
response = await search_handler(query.query, search_type)
13+
return {"response": response}
14+
except Exception as e:
15+
raise HTTPException(status_code=500, detail=str(e))
16+
17+
18+
@router.get("/autocomplete")
19+
async def recommmendation(input:str):
20+
try:
21+
# response = await auto_recommendation(query.query, search_type)
22+
return {"response": "fr"}
23+
except Exception as e:
24+
raise HTTPException(status_code=500, detail=str(e))

‎server/utils/chat.py‎

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
import openai
2+
import os
3+
from dotenv import load_dotenv
4+
load_dotenv()
5+
6+
# OpenAI API key setup
7+
openai.api_key = "YOUR_OPENAI_API_KEY"
8+
from groq import AsyncGroq
9+
10+
client = AsyncGroq(
11+
api_key=os.getenv('GROQ_API_KEY'),
12+
)
13+
14+
15+
16+
async def chat_handler(query: str):
17+
response = await openai.ChatCompletion.create(
18+
model="gpt-4", # or another model you want to use
19+
messages=[
20+
{"role": "user", "content": query}
21+
]
22+
)
23+
return response.choices[0].message['content']
24+
25+
26+
27+
async def transcribe_audio(file_content: bytes, file_name: str = "audio.m4a") -> str:
28+
"""
29+
Transcribe the given audio file content using Groq API.
30+
31+
:param file_content: The audio file content as bytes.
32+
:param file_name: The name of the file, default is "audio.m4a".
33+
:return: The transcription text.
34+
"""
35+
try:
36+
# Create a transcription of the audio file
37+
transcription = await client.audio.transcriptions.create(
38+
file=(file_name, file_content),
39+
model="distil-whisper-large-v3-en",
40+
prompt="Specify context or spelling",
41+
response_format="json",
42+
language="en",
43+
temperature=0.0
44+
)
45+
46+
return transcription.text
47+
48+
except Exception as e:
49+
raise RuntimeError(f"Error during transcription: {str(e)}")

‎server/utils/search.py‎

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import httpx
2+
from duckduckgo_search import DDGS
3+
4+
# DuckDuckGo API initialization
5+
ddgs = DDGS()
6+
7+
async def search_handler(query: str, search_type: str):
8+
if search_type == "text":
9+
results = list(ddgs.text(query))
10+
return results
11+
elif search_type == "images":
12+
results = list(ddgs.images(query))
13+
return results
14+
elif search_type == "news":
15+
results = list(ddgs.news(query))
16+
return results
17+
elif search_type == "maps":
18+
results = list(ddgs.maps(query))
19+
return results
20+
else:
21+
raise ValueError("Invalid search type")

0 commit comments

Comments
 (0)