File size: 1,376 Bytes
4ce999d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
from transformers import pipeline, VitsModel, AutoTokenizer
import torch
import os
from groq import Groq

# Transcriber model
transcriber = pipeline("automatic-speech-recognition", model="SamuelM0422/whisper-small-pt")

# Synthesise model
model = VitsModel.from_pretrained("facebook/mms-tts-por")
tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-por")

# LLM query function
def query(text, groq_api_key):
  client = Groq(
    api_key=groq_api_key,
  )

  chat_completion = client.chat.completions.create(
      messages=[
          {
              'role': 'system',
              'content': 'Answer the following question concisely and objectively. If there are numbers in the response, WRITE THEM IN WORDS.',
          },
          {
              "role": "user",
              "content": text,
          }
      ],
      model="llama-3.1-8b-instant",
  )

  return chat_completion.choices[0].message.content

# Synthesise function
def synthesise(text):
    inputs = tokenizer(text, return_tensors="pt")
    with torch.no_grad():
        output = model(**inputs).waveform

    return output.cpu()

# Piecing all them together
def ai_assistant(filepath, groq_key):
  transcription = transcriber(filepath)
  response = query(transcription['text'], groq_key)
  audio_response = synthesise(response)

  return (16000, audio_response.squeeze().cpu().numpy()), response