Prasada commited on
Commit
17fca91
·
1 Parent(s): 1f09c93

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -15
app.py CHANGED
@@ -48,21 +48,21 @@ def prepare_data(temp_text, audio_prompt):
48
  example_embeddings = torch.tensor(example["speaker_embeddings"]).unsqueeze(0)
49
  return example_embeddings
50
 
51
- def generate_gpt4_response(user_text, print_output=False):
52
- """
53
- Query OpenAI GPT-4 for the specific key and get back a response
54
- :type user_text: str the user's text to query for
55
- :type print_output: boolean whether or not to print the raw output JSON
56
- """
57
- message=[{"role": "user", "content": user_text+'in just 2 very small sentences'}]
58
- completions = ai.ChatCompletion.create(
59
- model="gpt-4",
60
- messages=message,
61
- max_tokens=250
62
- )
63
 
64
- # Return the first choice's text
65
- return completions['choices'][0]['message']['content']
66
 
67
 
68
  def predict(temp_text, temp_audio, record_audio_prompt, prompt_text):
@@ -71,7 +71,8 @@ def predict(temp_text, temp_audio, record_audio_prompt, prompt_text):
71
  else:
72
  audio_prompt = record_audio_prompt
73
 
74
- text = generate_gpt4_response(prompt_text)
 
75
  embeddings=prepare_data(temp_text, audio_prompt)
76
  inputs = processor(text=text, return_tensors="pt")
77
  spectrogram = model.generate_speech(inputs["input_ids"], embeddings)
 
48
  example_embeddings = torch.tensor(example["speaker_embeddings"]).unsqueeze(0)
49
  return example_embeddings
50
 
51
+ # def generate_gpt4_response(user_text, print_output=False):
52
+ # """
53
+ # Query OpenAI GPT-4 for the specific key and get back a response
54
+ # :type user_text: str the user's text to query for
55
+ # :type print_output: boolean whether or not to print the raw output JSON
56
+ # """
57
+ # message=[{"role": "user", "content": user_text+'in just 2 very small sentences'}]
58
+ # completions = ai.ChatCompletion.create(
59
+ # model="gpt-4",
60
+ # messages=message,
61
+ # max_tokens=250
62
+ # )
63
 
64
+ # # Return the first choice's text
65
+ # return completions['choices'][0]['message']['content']
66
 
67
 
68
  def predict(temp_text, temp_audio, record_audio_prompt, prompt_text):
 
71
  else:
72
  audio_prompt = record_audio_prompt
73
 
74
+ # text = generate_gpt4_response(prompt_text)
75
+ text=prompt_text
76
  embeddings=prepare_data(temp_text, audio_prompt)
77
  inputs = processor(text=text, return_tensors="pt")
78
  spectrogram = model.generate_speech(inputs["input_ids"], embeddings)