ollama / app.py
Echo-ai
Create app.py
e00b411 verified
raw
history blame
366 Bytes
from fastapi import FastAPI
from langchain_community.llms import Ollama
app = FastAPI()
# Initialize the Ollama model
llm = Ollama(model="tinyllama")
@app.get("/")
async def root():
return {"message": "Ollama is running on Hugging Face Spaces!"}
@app.get("/chat")
async def chat(query: str):
response = llm.invoke(query)
return {"response": response}