pokeberrypie commited on
Commit
b3b72cf
·
verified ·
1 Parent(s): 2e5718e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -18
app.py CHANGED
@@ -1,41 +1,48 @@
 
 
1
  import gradio as gr
2
  from transformers import pipeline
3
- import os
4
 
5
- # Fetch the API key from environment variables, securely set in Space's settings
6
  api_key = os.getenv("HF_API_KEY")
7
 
8
- if api_key is not None:
9
- os.environ["HF_HOME"] = "/workspace" # Hugging Face Spaces provides a /workspace directory
 
 
 
10
  os.environ["TRANSFORMERS_CACHE"] = "/workspace/cache" # Cache directory in Space
11
- os.environ["HF_API_KEY"] = api_key
12
 
13
- # Attempt to load your model using the pipeline API
14
- model_name = "tiiuae/falcon-180B" # Ensure this is the correct model name
15
  try:
 
16
  nlp_model = pipeline('text-generation', model=model_name)
 
17
  except Exception as e:
18
- nlp_model = None # fallback in case of an error
19
- else:
20
- nlp_model = None
21
- print("HF_API_KEY not found. Please set it in the Space's secrets.")
22
 
23
  def generate_text(prompt):
 
24
  if nlp_model:
25
- response = nlp_model(prompt, max_length=50, num_return_sequences=1)
26
- return response[0]['generated_text']
 
 
 
27
  else:
28
- return "Model could not be loaded. Please check the model name and API key."
29
 
30
- # Gradio interface
31
  iface = gr.Interface(
32
  fn=generate_text,
33
- inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your text here..."),
34
  outputs="text",
35
  title="Text Generation with Falcon Model",
36
- description="Enter some text and see how Falcon model continues it!"
37
  )
38
 
39
- # The following line is essential for Hugging Face Spaces
40
  if __name__ == "__main__":
41
  iface.launch()
 
1
+
2
+ import os
3
  import gradio as gr
4
  from transformers import pipeline
 
5
 
6
+ # Load the API key from environment variables
7
  api_key = os.getenv("HF_API_KEY")
8
 
9
+ if api_key is None:
10
+ print("HF_API_KEY not found. Please set it in the Space's secrets.")
11
+ else:
12
+ # Set the necessary environment variables for Hugging Face Transformers
13
+ os.environ["HF_HOME"] = "/workspace" # Use the /workspace directory in Hugging Face Spaces
14
  os.environ["TRANSFORMERS_CACHE"] = "/workspace/cache" # Cache directory in Space
15
+ os.environ["HF_API_KEY"] = api_key # Set the API key for Transformers library
16
 
17
+ # Initialize the model using the API key and Transformers pipeline
 
18
  try:
19
+ model_name = "tiiuae/falcon-180B" # Ensure this is the correct model name
20
  nlp_model = pipeline('text-generation', model=model_name)
21
+ print("Model loaded successfully.")
22
  except Exception as e:
23
+ nlp_model = None
24
+ print(f"Failed to load model: {str(e)}")
 
 
25
 
26
  def generate_text(prompt):
27
+ """Generate text based on the input prompt using the loaded NLP model."""
28
  if nlp_model:
29
+ try:
30
+ response = nlp_model(prompt, max_length=50, num_return_sequences=1)
31
+ return response[0]['generated_text']
32
+ except Exception as e:
33
+ return f"Error during model inference: {str(e)}"
34
  else:
35
+ return "Model could not be loaded or initialized properly."
36
 
37
+ # Setup Gradio interface
38
  iface = gr.Interface(
39
  fn=generate_text,
40
+ inputs=gr.Textbox(lines=2, placeholder="Enter your text here..."),
41
  outputs="text",
42
  title="Text Generation with Falcon Model",
43
+ description="Enter some text and see how the Falcon model continues it!"
44
  )
45
 
46
+ # Essential for Hugging Face Spaces
47
  if __name__ == "__main__":
48
  iface.launch()