Spaces:
Runtime error
Runtime error
File size: 1,471 Bytes
7c2904b 74a0653 7c2904b 74a0653 7c2904b 74a0653 7c2904b 74a0653 7c2904b 74a0653 7c2904b 74a0653 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import gradio as gr
from transformers import pipeline
import os
# Fetch the API key from environment variables, securely set in Space's settings
api_key = os.getenv("HF_API_KEY")
if api_key is not None:
os.environ["HF_HOME"] = "/workspace" # Hugging Face Spaces provides a /workspace directory
os.environ["TRANSFORMERS_CACHE"] = "/workspace/cache" # Cache directory in Space
os.environ["HF_API_KEY"] = api_key
# Attempt to load your model using the pipeline API
model_name = "tiiuae/falcon-180B" # Ensure this is the correct model name
try:
nlp_model = pipeline('text-generation', model=model_name)
except Exception as e:
nlp_model = None # fallback in case of an error
else:
nlp_model = None
print("HF_API_KEY not found. Please set it in the Space's secrets.")
def generate_text(prompt):
if nlp_model:
response = nlp_model(prompt, max_length=50, num_return_sequences=1)
return response[0]['generated_text']
else:
return "Model could not be loaded. Please check the model name and API key."
# Gradio interface
iface = gr.Interface(
fn=generate_text,
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your text here..."),
outputs="text",
title="Text Generation with Falcon Model",
description="Enter some text and see how Falcon model continues it!"
)
# The following line is essential for Hugging Face Spaces
if __name__ == "__main__":
iface.launch() |