K00B404 commited on
Commit
e8b1e31
·
verified ·
1 Parent(s): 67d278f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -2
app.py CHANGED
@@ -3,12 +3,22 @@ import requests
3
  from PIL import Image
4
  from io import BytesIO
5
  import os
 
 
 
 
 
6
 
 
 
 
 
 
 
 
7
  # Load API Token from environment variable
8
- API_TOKEN = os.getenv("HF_API_TOKEN") # Ensure you've set this environment variable
9
 
10
  # Hugging Face Inference API URL
11
- API_URL = "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-uncensored"
12
 
13
  # Function to call Hugging Face API and get the generated image
14
  def generate_image(prompt):
 
3
  from PIL import Image
4
  from io import BytesIO
5
  import os
6
+ from huggingface_hub import InferenceClient
7
+ API_TOKEN = os.getenv("HF_API_TOKEN") # Ensure you've set this environment variable
8
+ API_URL = "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-uncensored"
9
+
10
+ enhancer = InferenceClient(api_key =API_TOKEN)
11
 
12
+ for message in client.chat_completion(
13
+ model="meta-llama/Llama-3.2-1B-Instruct",
14
+ messages=[{"role": "user", "content": "What is the capital of France?"}],
15
+ max_tokens=500,
16
+ stream=True,
17
+ ):
18
+ print(message.choices[0].delta.content, end="")
19
  # Load API Token from environment variable
 
20
 
21
  # Hugging Face Inference API URL
 
22
 
23
  # Function to call Hugging Face API and get the generated image
24
  def generate_image(prompt):