genaibeauty commited on
Commit
d1b3c3a
·
verified ·
1 Parent(s): 81858f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -14
app.py CHANGED
@@ -1,37 +1,32 @@
1
  import gradio as gr
2
  import requests
3
  import os
4
- from huggingface_hub import InferenceClient
5
 
6
  # Set up the Hugging Face API key (ensure you've set this as an environment variable)
7
  api_key = os.getenv("HUGGINGFACEHUB_API_TOKEN")
8
 
9
  # API URLs
10
  WHISPER_API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v3-turbo"
 
11
 
12
- # Set up inference client for DeepSeek-R1
13
- client = InferenceClient(
14
- provider="together",
15
- api_key=api_key
16
- )
17
 
18
  def transcribe_audio(audio_file):
19
  with open(audio_file, "rb") as f:
20
  data = f.read()
21
- response = requests.post(WHISPER_API_URL, headers={"Authorization": f"Bearer {api_key}"}, data=data)
22
  if response.status_code == 200:
23
  return response.json().get("text", "Transcription not available.")
24
  else:
25
  return f"Error: {response.status_code}, {response.text}"
26
 
27
  def generate_mermaid_code(prompt):
28
- messages = [{"role": "user", "content": f"Generate a valid MermaidJS diagram code for the following: {prompt}"}]
29
- completion = client.chat.completions.create(
30
- model="deepseek-ai/DeepSeek-R1",
31
- messages=messages,
32
- max_tokens=500
33
- )
34
- return completion.choices[0].message['content'].strip()
35
 
36
  def process_input(input_type, text_input, audio_input):
37
  if input_type == "Audio" and audio_input is not None:
 
1
  import gradio as gr
2
  import requests
3
  import os
 
4
 
5
  # Set up the Hugging Face API key (ensure you've set this as an environment variable)
6
  api_key = os.getenv("HUGGINGFACEHUB_API_TOKEN")
7
 
8
  # API URLs
9
  WHISPER_API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v3-turbo"
10
+ DEEPSEEK_API_URL = "https://api-inference.huggingface.co/models/deepseek-ai/DeepSeek-R1"
11
 
12
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
 
 
 
 
13
 
14
  def transcribe_audio(audio_file):
15
  with open(audio_file, "rb") as f:
16
  data = f.read()
17
+ response = requests.post(WHISPER_API_URL, headers=headers, data=data)
18
  if response.status_code == 200:
19
  return response.json().get("text", "Transcription not available.")
20
  else:
21
  return f"Error: {response.status_code}, {response.text}"
22
 
23
  def generate_mermaid_code(prompt):
24
+ payload = {"inputs": prompt, "parameters": {"max_tokens": 500}}
25
+ response = requests.post(DEEPSEEK_API_URL, headers=headers, json=payload)
26
+ if response.status_code == 200:
27
+ return response.json()["generated_text"].strip()
28
+ else:
29
+ return f"Error: {response.status_code}, {response.text}"
 
30
 
31
  def process_input(input_type, text_input, audio_input):
32
  if input_type == "Audio" and audio_input is not None: