import gradio as gr import requests import os API_TOKEN = os.getenv('API_TOKEN') API_URL = "https://api-inference.huggingface.co/models/nasa-impact/nasa-smd-ibm-v0.1" headers = {"Authorization": f"Bearer {API_TOKEN}"} def query(payload): response = requests.post(API_URL, headers=headers, json=payload) return response.json() def get_model_output(input_text): payload = { "inputs": input_text } #output = query({"inputs": "The answer to the universe is .", }) output = query(payload) results = [] for item in output: sequence = item.get('sequence', '') score = item.get('score', 0) results.append(f"{sequence} (Score: {score:.4f})") return "\n".join(results) # Define Gradio interface article_text = """ ### Notes: ### Possible Demo Apps: 1. **Data Cleaning and Imputation** 2. **Content Generation and Brainstorming** 3. **Code Completion and Documentation** """ demo = gr.Interface( fn=get_model_output, inputs=gr.Textbox(lines=2, label="Input with mask token", placeholder="Enter a sentence with to fill..."), outputs="text", # Display the output as JSON to inspect the response structure title="nasa-smd-ibm-v0.1 Model Output", description="NASA SMD Indus model response (Fill Mask).", article=article_text ) # Launch the interface demo.launch()