genaibeauty commited on
Commit
a4c2d51
·
verified ·
1 Parent(s): cca27df

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -12,8 +12,8 @@ WHISPER_API_URL = "https://api-inference.huggingface.co/models/openai/whisper-la
12
  # Set up headers for the Whisper API request
13
  headers = {"Authorization": f"Bearer {api_key}"}
14
 
15
- # Load the DeepSeek model using Hugging Face's pipeline (no API call, local model)
16
- pipe = pipeline("text-generation", model="deepseek-ai/DeepSeek-R1", trust_remote_code=True)
17
 
18
  # Function to query the Hugging Face Whisper model for audio transcription (API call)
19
  def transcribe_audio(audio_file):
@@ -25,14 +25,14 @@ def transcribe_audio(audio_file):
25
  else:
26
  return f"Error: {response.status_code}, {response.text}"
27
 
28
- # Function to generate Mermaid.js code using DeepSeek-R1 model (local processing)
29
  def generate_mermaid_code(prompt):
30
  # Instruction included in the prompt to guide DeepSeek to generate valid MermaidJS code
31
- deepseek_prompt = f"Generate all possible valid MermaidJS diagram code for the following: {prompt}"
32
 
33
- # Using the DeepSeek model pipeline for text generation
34
- response = pipe([{"role": "user", "content": deepseek_prompt}])
35
- return response[0]["generated_text"].strip()
36
 
37
  # Function to process text, audio, or both inputs
38
  def process_input(input_type, text_input, audio_input):
 
12
  # Set up headers for the Whisper API request
13
  headers = {"Authorization": f"Bearer {api_key}"}
14
 
15
+ # Load the DeepSeek model using Gradio's load function from the registry
16
+ demo = gr.load(name="deepseek-ai/DeepSeek-R1", src="transformers_gradio.registry")
17
 
18
  # Function to query the Hugging Face Whisper model for audio transcription (API call)
19
  def transcribe_audio(audio_file):
 
25
  else:
26
  return f"Error: {response.status_code}, {response.text}"
27
 
28
+ # Function to generate Mermaid.js code using DeepSeek-R1 model
29
  def generate_mermaid_code(prompt):
30
  # Instruction included in the prompt to guide DeepSeek to generate valid MermaidJS code
31
+ deepseek_prompt = f"Generate a valid MermaidJS diagram code for the following: {prompt}"
32
 
33
+ # Use the loaded model `demo` to generate the MermaidJS code
34
+ response = demo(deepseek_prompt)
35
+ return response.strip()
36
 
37
  # Function to process text, audio, or both inputs
38
  def process_input(input_type, text_input, audio_input):