#https://stackoverflow.com/questions/75286784/how-do-i-gracefully-close-terminate-gradio-from-within-gradio-blocks #https://levelup.gitconnected.com/bringing-your-ai-models-to-life-an-introduction-to-gradio-ae051ca83edf #Bringing Your AI Models to Life: An Introduction to Gradio - How to demo your ML model quickly without any front-end hassle. #pipreqs . --encoding=utf-8 #https://huggingface.co/blog/inference-endpoints #Getting Started with Hugging Face Inference Endpoints #https://www.gradio.app/guides/using-hugging-face-integrations Using Hugging Face Integrations #在app.py的根目录下cmd命令行窗口中运行:gradio deploy,将会出现如下提示: #To login, `huggingface_hub` requires a token generated from https://huggingface.co/settings/tokens . #Token can be pasted using 'Right-Click'. #Token:先将HF_API_TOKEN拷贝到剪贴板之后,邮件单击即可 #Add token as git credential? (Y/n) n #Token is valid (permission: write). #Your token has been saved to C:\Users\lenovo\.cache\huggingface\token #Login successful #Creating new Spaces Repo in 'D:\ChatGPTApps\Gradio_HF_Apps'. Collecting metadata, press Enter to accept default value. #Enter Spaces app title [Gradio_HF_Apps]: import os import io import requests import json from IPython.display import Image, display, HTML from PIL import Image import base64 from dotenv import load_dotenv, find_dotenv _ = load_dotenv(find_dotenv()) # read local .env file hf_api_key ="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" #model_id = "sentence-transformers/all-MiniLM-L6-v2" model_id = "shleifer/distilbart-cnn-12-6" #hf_token = "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" #ENDPOINT_URL='https://api-inference.huggingface.co/models/DunnBC22/flan-t5-base-text_summarization_data' api_url ='https://api-inference.huggingface.co/models/DunnBC22/flan-t5-base-text_summarization_data' #api_url = f"https://api-inference.huggingface.co/pipeline/feature-extraction/{model_id}" #pipeline的api-inference/Endpoint是对应于transformers(用于生成向量Embeddings)的! #headers = {"Authorization": f"Bearer {hf_token}"} headers = {"Authorization": f"Bearer {hf_api_key}"} #def query(texts): # response = requests.post(api_url, headers=headers, json={"inputs": texts, "options":{"wait_for_model":True}}) # return response.json() #可以不使用ENDPOINT_URL,而是使用api_url(或者两个是一回事?):https://huggingface.co/blog/getting-started-with-embeddings def get_completion(inputs, parameters=None, ENDPOINT_URL=api_url): headers = { "Authorization": f"Bearer {hf_api_key}", "Content-Type": "application/json" } data = { "inputs": inputs } if parameters is not None: data.update({"parameters": parameters}) response = requests.request("POST", ENDPOINT_URL, headers=headers, data=json.dumps(data) ) return json.loads(response.content.decode("utf-8")) text = ( '''Alexander III of Macedon (Ancient Greek: Ἀλέξανδρος, romanized: Alexandros; 20/21 July 356 BC – 10/11 June 323 BC), ''' '''commonly known as Alexander the Great,[a] was a king of the ancient Greek kingdom of Macedon.[a] ''' '''He succeeded his father Philip II to the throne in 336 BC at the age of 20, ''' '''and spent most of his ruling years conducting a lengthy military campaign throughout Western Asia and Egypt. ''' '''By the age of 30, he had created one of the largest empires in history, stretching from Greece to northwestern India.[2] ''' '''He was undefeated in battle and is widely considered to be one of history's greatest and most successful military commanders.[3][4]''' ) get_completion(text) #api_url中的model_id = "sentence-transformers/all-MiniLM-L6-v2"时,返回的结果是浮点数(即向量Embeddings) import gradio as gr def summarize(input): output = get_completion(input) return output[0]['summary_text'] gr.close_all() demo = gr.Interface(fn=summarize, inputs=[gr.Textbox(label="Text to summarize", lines=6)], outputs=[gr.Textbox(label="Result", lines=3)], title="Text summarization with distilbart-cnn", description="Summarize any text using the `shleifer/distilbart-cnn-12-6` model under the hood!" ) #demo.launch(share=True, server_port=int(os.environ['PORT2'])) #demo.launch(share=True) demo.launch()