| from fastapi import FastAPI, Request |
| from fastapi.responses import StreamingResponse |
| from fastapi.middleware.cors import CORSMiddleware |
| from models.text.together.main import TogetherAPI |
| from models.text.vercel.main import XaiAPI, GroqAPI, DeepinfraAPI |
| from models.image.vercel.main import FalAPI |
| from models.image.together.main import TogetherImageAPI |
| from models.text.deepinfra.main import OFFDeepInfraAPI |
| from models.fetch import FetchModel |
| from auth.key import NimbusAuthKey |
| from tools.googlesearch.main import search |
| from tools.fetch import Tools |
| import httpx |
|
|
| app = FastAPI() |
|
|
| app.add_middleware( |
| CORSMiddleware, |
| allow_origins=["*"], |
| allow_credentials=True, |
| allow_methods=["*"], |
| allow_headers=["*"], |
| ) |
|
|
| @app.get("/") |
| async def root(): |
| return {"status":"ok", "routes":{"/":"GET", "/api/v1/generate":"POST", "/api/v1/models":"GET", "/api/v1/generate-images":"POST"}, "models": ["text", "image"]} |
|
|
| @app.post("/api/v1/generate") |
| async def generate(request: Request): |
| data = await request.json() |
| messages = data['messages'] |
| model = data['model'] |
|
|
| if not messages or not model: |
| return {"error": "Invalid request. 'messages' and 'model' are required."} |
|
|
| try: |
| query = { |
| 'model': model, |
| 'max_tokens': None, |
| 'temperature': 0.7, |
| 'top_p': 0.7, |
| 'top_k': 50, |
| 'repetition_penalty': 1, |
| 'stream_tokens': True, |
| 'stop': ['<|eot_id|>', '<|eom_id|>'], |
| 'messages': messages, |
| 'stream': True, |
| } |
| |
| together_models = TogetherAPI().get_model_list() |
| xai_models = XaiAPI().get_model_list() |
| groq_models = GroqAPI().get_model_list() |
| deepinfra_models = DeepinfraAPI().get_model_list() |
|
|
| if model in together_models: |
| streamModel = TogetherAPI() |
| elif model in xai_models: |
| streamModel = XaiAPI() |
| elif model in groq_models: |
| streamModel = GroqAPI() |
| elif model in deepinfra_models: |
| streamModel = DeepinfraAPI() |
| else: |
| return {"error": f"Model '{model}' is not supported."} |
|
|
| response = streamModel.generate(query) |
|
|
| return StreamingResponse(response, media_type="text/event-stream") |
| |
| except Exception as e: |
| return {"error": f"An error occurred: {str(e)}"} |
|
|
| @app.get("/api/v1/models") |
| async def get_models(): |
| try: |
| models = { |
| 'text': { |
| 'together': TogetherAPI().get_model_list(), |
| 'xai': XaiAPI().get_model_list(), |
| 'groq': GroqAPI().get_model_list(), |
| 'deepinfra': DeepinfraAPI().get_model_list(), |
| "official_deepinfra": OFFDeepInfraAPI().get_model_list() |
| }, |
| 'image': { |
| 'fal': FalAPI().get_model_list(), |
| 'together': TogetherImageAPI().get_model_list() |
| } |
| } |
| return {"models": models} |
| except Exception as e: |
| return {"error": f"An error occurred: {str(e)}"} |
| |
| @app.post('/api/v1/generate-images') |
| async def generate_images(request: Request): |
| data = await request.json() |
| prompt = data['prompt'] |
| model = data['model'] |
| print(model) |
|
|
| fal_models = FalAPI().get_model_list() |
| together_models = TogetherImageAPI().get_model_list() |
| if not prompt or not model: |
| return {"error": "Invalid request. 'prompt' and 'model' are required."} |
| if model in fal_models: |
| streamModel = FalAPI() |
| elif model in together_models: |
| streamModel = TogetherImageAPI() |
| else: |
| return {"error": f"Model '{model}' is not supported."} |
| try: |
| query = { |
| 'prompt': prompt, |
| 'modelId': model, |
| } |
| response = await streamModel.generate(query) |
| return response |
| |
| except Exception as e: |
| return {"error": f"An error occurred: {str(e)}"} |
| |
|
|
| @app.get('/api/v1/fetch-models') |
| async def fetch_models(): |
| model = FetchModel() |
| return model.all_models() |
|
|
| @app.post('/api/v1/text/generate') |
| async def text_generate(request: Request): |
| data = await request.json() |
| messages = data['messages'] |
| choice = data['model'] |
| api_key = data.get('api_key') |
|
|
| auth = NimbusAuthKey() |
| user = auth.get_user(data.get('api_key')) |
| if not user: |
| return {"error": "Invalid API key"} |
| if not api_key: |
| return {"error": "API key is required"} |
|
|
| if not messages or not choice: |
| return {"error": "Invalid request. 'messages' and 'model' are required."} |
| |
| model = FetchModel().select_model(choice) |
| if not model: |
| return {"error": f"Model '{choice}' is not supported."} |
|
|
| try: |
| query = { |
| 'model': model, |
| 'max_tokens': None, |
| 'temperature': 0.7, |
| 'top_p': 0.7, |
| 'top_k': 50, |
| 'repetition_penalty': 1, |
| 'stream_tokens': True, |
| 'stop': ['<|eot_id|>', '<|eom_id|>'], |
| 'messages': messages, |
| 'stream': True, |
| } |
| |
| together_models = TogetherAPI().get_model_list() |
| xai_models = XaiAPI().get_model_list() |
| groq_models = GroqAPI().get_model_list() |
| deepinfra_models = DeepinfraAPI().get_model_list() |
| official_deepinfra_models = OFFDeepInfraAPI().get_model_list() |
|
|
| if model in together_models: |
| streamModel = TogetherAPI() |
| elif model in xai_models: |
| streamModel = XaiAPI() |
| elif model in groq_models: |
| streamModel = GroqAPI() |
| elif model in deepinfra_models: |
| streamModel = DeepinfraAPI() |
| elif model in official_deepinfra_models: |
| streamModel = OFFDeepInfraAPI() |
| else: |
| return {"error": f"Model '{model}' is not supported."} |
|
|
| response = streamModel.generate(query) |
|
|
| return StreamingResponse(response, media_type="text/event-stream") |
| |
| except Exception as e: |
| return {"error": f"An error occurred: {str(e)}"} |
| |
| @app.get('/api/v1/tools') |
| async def tools(): |
| return Tools.fetch_tools() |
| |
|
|
| @app.get('/api/v1/tools/google-search') |
| async def searchtool(request: Request): |
| data = await request.json() |
| query = data['query'] |
| num_results = data.get('num_results', 10) |
|
|
| response = search(term=query, num_results=num_results, advanced=True, unique=False) |
|
|
| return response |
|
|
| OPENROUTER_HEADERS = { |
| 'accept': 'application/json', |
| 'accept-language': 'en-US,en;q=0.9,ja;q=0.8', |
| 'authorization': 'Bearer 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', |
| 'content-type': 'application/json', |
| 'http-referer': 'https://lomni.io', |
| 'origin': 'https://lomni.io', |
| 'priority': 'u=1, i', |
| 'referer': 'https://lomni.io/', |
| 'sec-ch-ua': '"Google Chrome";v="137", "Chromium";v="137", "Not/A)Brand";v="24"', |
| 'sec-ch-ua-mobile': '?0', |
| 'sec-ch-ua-platform': '"macOS"', |
| 'sec-fetch-dest': 'empty', |
| 'sec-fetch-mode': 'cors', |
| 'sec-fetch-site': 'cross-site', |
| 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36', |
| 'x-stainless-arch': 'unknown', |
| 'x-stainless-lang': 'js', |
| 'x-stainless-os': 'Unknown', |
| 'x-stainless-package-version': '4.86.1', |
| 'x-stainless-retry-count': '0', |
| 'x-stainless-runtime': 'browser:chrome', |
| 'x-stainless-runtime-version': '137.0.0', |
| 'x-stainless-timeout': '600000', |
| 'x-title': 'lomni', |
| } |
|
|
| @app.post('/api/stream') |
| async def streamres(request: Request): |
| body = await request.json() |
| messages = body.get('messages', []) |
| model = body.get('model', 'anthropic/claude-sonnet-4') |
|
|
| data = { |
| 'model': model, |
| 'messages': messages, |
| 'max_tokens': 150000, |
| 'stream': True, |
| 'transforms': [ |
| 'middle-out', |
| ] |
| } |
|
|
| async def proxy_stream(): |
| async with httpx.AsyncClient(timeout=None) as client: |
| async with client.stream( |
| "POST", |
| "https://openrouter.ai/api/v1/chat/completions", |
| headers=OPENROUTER_HEADERS, |
| json=data, |
| ) as response: |
| async for line in response.aiter_lines(): |
| if line: |
| yield f"{line}\n" |
|
|
| return StreamingResponse(proxy_stream(), media_type='text/event-stream') |
| |
| |
|
|