| | from fastapi import FastAPI, HTTPException |
| | from pydantic import BaseModel |
| | from fastapi.middleware.cors import CORSMiddleware |
| | import uvicorn |
| | from langchain_google_genai import ChatGoogleGenerativeAI |
| | import os |
| | from dotenv import load_dotenv |
| |
|
| | load_dotenv() |
| |
|
| | app = FastAPI() |
| |
|
| |
|
| | |
| | app.add_middleware( |
| | CORSMiddleware, |
| | allow_origins=["http://localhost:3000", "chrome-extension://*"], |
| | allow_credentials=True, |
| | allow_methods=["*"], |
| | allow_headers=["*"], |
| | ) |
| |
|
| | |
| | GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") |
| |
|
| | |
| | class MeaningRequest(BaseModel): |
| | text: str |
| |
|
| | |
| | class MeaningResponse(BaseModel): |
| | meaning: str |
| |
|
| | def get_meaning_from_llm(text: str) -> str: |
| | """ |
| | Get meaning of text using Google's Generative AI. |
| | """ |
| | |
| | prompt = f"Explain the meaning of the following text in simple terms in only one or two lines not more than that: '{text}'" |
| | |
| | |
| | llm = ChatGoogleGenerativeAI( |
| | model="gemini-1.5-flash", |
| | temperature=0.1, |
| | max_tokens=None, |
| | timeout=None, |
| | max_retries=2, |
| | google_api_key=GOOGLE_API_KEY |
| | ) |
| | response = llm.invoke(prompt) |
| | return response.content |
| |
|
| | @app.post("/get_meaning", response_model=MeaningResponse) |
| | async def get_meaning(request: MeaningRequest): |
| | """ |
| | Endpoint to return meaning. |
| | """ |
| | try: |
| | print(f"Received text: {request.text}") |
| | |
| | text = request.text |
| | |
| | meaning = get_meaning_from_llm(text) |
| | |
| | return MeaningResponse( |
| | meaning=meaning |
| | ) |
| | except Exception as e: |
| | print(f"An error occurred: {e}") |
| | raise HTTPException(status_code=500, detail=str(e)) |
| |
|
| | if __name__ == "__main__": |
| | |
| | uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True) |