import fastapi import json import markdown import uvicorn from fastapi.responses import HTMLResponse from fastapi.middleware.cors import CORSMiddleware from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from ctransformers import AutoModelForCausalLM from pydantic import BaseModel from sse_starlette.sse import EventSourceResponse config = { "temperature": 0.8, "top_p": 0.95, "top_k": 50, "max_new_tokens": 1024, "use_cache": True, "do_sample": True, "repetition_penalty": 1.02, "max_seq_len": 4096 } llm = AutoModelForCausalLM.from_pretrained('TheBloke/MPT-7B-Storywriter-GGML', model_file='mpt-7b-storywriter.ggmlv3.q4_0.bin', model_type='mpt', config=config) app = fastapi.FastAPI() app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) @app.get("/") async def index(): with open("README.md", "r", encoding="utf-8") as readme_file: md_template_string = readme_file.read() html_content = markdown.markdown(md_template_string) return HTMLResponse(content=html_content, status_code=200) class ChatCompletionRequest(BaseModel): prompt: str @app.get("/demo") async def demo(): html_content = """