import gradio as gr import requests import os API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom" HF_TOKEN = os.environ["HF_TOKEN"] headers = {"Authorization": f"Bearer {HF_TOKEN}"} def get_results(prompt): json_ = {"inputs": prompt, "parameters": { "top_p": 0.9, "typical_p":0.2, "temperature": 0.8, "max_new_tokens": 250, "return_full_text": True }, "options": { "use_cache": True, "wait_for_model":True },} try: response = requests.post(API_URL, headers=headers, json=json_) output = response.json() output_tmp = output[0]['generated_text'] except: output_tmp = "Not able to work" return output_tmp def learn_goals(topic, text): prompt = "Topic: " + topic + '\n' prompt = prompt + text + '\n' prompt = prompt + """Some examples of learning goals from above are: 1. The student will be able to""" return get_results(prompt) topic = gr.Textbox(lines = 1, placeholder="Topic") text = gr.Textbox(lines = 5, placeholder="Text") iface = gr.Interface(fn=learn_goals, inputs=[topic, text], outputs="text") iface.launch()