Spaces:
Running on CPU Upgrade

akhaliq HF staff commited on
Commit
61d159f
·
1 Parent(s): eebe845

add huggingface inference providers

Browse files
Files changed (3) hide show
  1. app_huggingface.py +15 -62
  2. pyproject.toml +1 -1
  3. requirements.txt +3 -2
app_huggingface.py CHANGED
@@ -1,67 +1,20 @@
1
- import gradio as gr
2
- from gradio_client import Client, handle_file
3
 
4
- MODELS = {"SmolVLM-Instruct": "akhaliq/SmolVLM-Instruct"}
5
 
 
 
6
 
7
- def create_chat_fn(client):
8
- def chat(message, history):
9
- # Extract text and files from the message
10
- text = message.get("text", "")
11
- files = message.get("files", [])
12
 
13
- # Handle file uploads if present
14
- processed_files = [handle_file(f) for f in files]
15
 
16
- response = client.predict(
17
- message={"text": text, "files": processed_files},
18
- system_prompt="You are a helpful AI assistant.",
19
- temperature=0.7,
20
- max_new_tokens=1024,
21
- top_k=40,
22
- repetition_penalty=1.1,
23
- top_p=0.95,
24
- api_name="/chat",
25
- )
26
- return response
27
-
28
- return chat
29
-
30
-
31
- def set_client_for_session(model_name, request: gr.Request):
32
- headers = {}
33
- if request and hasattr(request, "headers"):
34
- x_ip_token = request.headers.get("x-ip-token")
35
- if x_ip_token:
36
- headers["X-IP-Token"] = x_ip_token
37
-
38
- return Client(MODELS[model_name], headers=headers)
39
-
40
-
41
- def safe_chat_fn(message, history, client):
42
- if client is None:
43
- return "Error: Client not initialized. Please refresh the page."
44
- try:
45
- return create_chat_fn(client)(message, history)
46
- except Exception as e:
47
- print(f"Error during chat: {e!s}")
48
- return f"Error during chat: {e!s}"
49
-
50
-
51
- with gr.Blocks() as demo:
52
- client = gr.State()
53
-
54
- model_dropdown = gr.Dropdown(
55
- choices=list(MODELS.keys()), value="SmolVLM-Instruct", label="Select Model", interactive=True
56
- )
57
-
58
- chat_interface = gr.ChatInterface(fn=safe_chat_fn, additional_inputs=[client], multimodal=True)
59
-
60
- # Update client when model changes
61
- model_dropdown.change(fn=set_client_for_session, inputs=[model_dropdown], outputs=[client])
62
-
63
- # Initialize client on page load
64
- demo.load(fn=set_client_for_session, inputs=[gr.State("SmolVLM-Instruct")], outputs=[client])
65
-
66
- if __name__ == "__main__":
67
- demo.launch()
 
1
+ import ai_gradio
 
2
 
3
+ from utils_ai_gradio import get_app
4
 
5
+ # Get the hyperbolic models but keep their full names for loading
6
+ HUGGINGFACE_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("huggingface:")]
7
 
8
+ # Create display names without the prefix
9
+ HUGGINGFACE_MODELS_DISPLAY = [k.replace("huggingface:", "") for k in HUGGINGFACE_MODELS_FULL]
 
 
 
10
 
 
 
11
 
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=HUGGINGFACE_MODELS_FULL, # Use the full names with prefix
15
+ default_model=HUGGINGFACE_MODELS_FULL[0],
16
+ dropdown_label="Select Huggingface Model",
17
+ choices=HUGGINGFACE_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ coder=True,
20
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pyproject.toml CHANGED
@@ -38,7 +38,7 @@ dependencies = [
38
  "langchain>=0.3.14",
39
  "chromadb>=0.5.23",
40
  "openai>=1.55.0",
41
- "ai-gradio[crewai,deepseek,gemini,groq,hyperbolic,openai,smolagents,transformers, langchain, mistral,minimax,nvidia, qwen, openrouter]>=0.2.46",
42
  ]
43
 
44
  [tool.uv.sources]
 
38
  "langchain>=0.3.14",
39
  "chromadb>=0.5.23",
40
  "openai>=1.55.0",
41
+ "ai-gradio[crewai,deepseek,gemini,groq,hyperbolic,openai,smolagents,transformers, langchain, mistral,minimax,nvidia, qwen, openrouter, huggingface]>=0.2.47",
42
  ]
43
 
44
  [tool.uv.sources]
requirements.txt CHANGED
@@ -2,7 +2,7 @@
2
  # uv pip compile pyproject.toml -o requirements.txt
3
  accelerate==1.2.1
4
  # via ai-gradio
5
- ai-gradio==0.2.46
6
  # via anychat (pyproject.toml)
7
  aiofiles==23.2.1
8
  # via gradio
@@ -428,9 +428,10 @@ httpx-sse==0.4.0
428
  # langchain-community
429
  httpx-ws==0.7.1
430
  # via fireworks-ai
431
- huggingface-hub==0.27.1
432
  # via
433
  # accelerate
 
434
  # gradio
435
  # gradio-client
436
  # tokenizers
 
2
  # uv pip compile pyproject.toml -o requirements.txt
3
  accelerate==1.2.1
4
  # via ai-gradio
5
+ ai-gradio==0.2.47
6
  # via anychat (pyproject.toml)
7
  aiofiles==23.2.1
8
  # via gradio
 
428
  # langchain-community
429
  httpx-ws==0.7.1
430
  # via fireworks-ai
431
+ huggingface-hub==0.28.1
432
  # via
433
  # accelerate
434
+ # ai-gradio
435
  # gradio
436
  # gradio-client
437
  # tokenizers