Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI | |
| from fastapi.responses import RedirectResponse | |
| from langserve import add_routes | |
| from langchain.prompts import ChatPromptTemplate | |
| from langchain.chat_models import ChatOpenAI | |
| from dotenv import load_dotenv | |
| from langchain_google_genai.chat_models import ChatGoogleGenerativeAI | |
| load_dotenv() | |
| import os | |
| api_url = os.getenv('PROXY_URL') + os.getenv('PROXY_PREFIX') | |
| app = FastAPI( | |
| title="Dev Assistant LangServe", | |
| version="1.0", | |
| description="A simple api server using Langchain's Runnable interfaces", | |
| root_path_in_servers=True, | |
| root_path=api_url, | |
| debug=True, | |
| ) | |
| async def root(): | |
| # return json response | |
| return {"message": "Dev Assistant LangServe is up and running!"} | |
| add_routes( | |
| path = "/openai", | |
| app = app, | |
| runnable= ChatOpenAI(model="gpt-4-1106-preview"), | |
| disabled_endpoints=[] | |
| ) | |
| add_routes( | |
| path = "/google", | |
| app = app, | |
| runnable= ChatGoogleGenerativeAI(model="gemini-pro"), | |
| disabled_endpoints=[] | |
| ) | |
| if __name__ == "__main__": | |
| import uvicorn | |
| uvicorn.run(app, host="localhost", port=7860, root_path=api_url) | |