Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import pipeline | |
import os | |
# Fetch the API key from environment variables, securely set in Space's settings | |
api_key = os.getenv("HF_API_KEY") | |
if api_key is not None: | |
os.environ["HF_HOME"] = "/workspace" # Hugging Face Spaces provides a /workspace directory | |
os.environ["TRANSFORMERS_CACHE"] = "/workspace/cache" # Cache directory in Space | |
os.environ["HF_API_KEY"] = api_key | |
# Attempt to load your model using the pipeline API | |
model_name = "tiiuae/falcon-180B" # Ensure this is the correct model name | |
try: | |
nlp_model = pipeline('text-generation', model=model_name) | |
except Exception as e: | |
nlp_model = None # fallback in case of an error | |
else: | |
nlp_model = None | |
print("HF_API_KEY not found. Please set it in the Space's secrets.") | |
def generate_text(prompt): | |
if nlp_model: | |
response = nlp_model(prompt, max_length=50, num_return_sequences=1) | |
return response[0]['generated_text'] | |
else: | |
return "Model could not be loaded. Please check the model name and API key." | |
# Gradio interface | |
iface = gr.Interface( | |
fn=generate_text, | |
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your text here..."), | |
outputs="text", | |
title="Text Generation with Falcon Model", | |
description="Enter some text and see how Falcon model continues it!" | |
) | |
# The following line is essential for Hugging Face Spaces | |
if __name__ == "__main__": | |
iface.launch() |