Ozaii commited on
Commit
939217d
·
verified ·
1 Parent(s): 073e834

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -43
app.py CHANGED
@@ -1,11 +1,7 @@
1
- import requests
2
- import base64
3
  import gradio as gr
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
5
  import torch
6
  import spaces
7
- from pydub import AudioSegment
8
- from pydub.playback import play
9
 
10
  # Load the model and tokenizer from Hugging Face
11
  model_path = "Ozaii/Wali-8B-Uncensored-Model" # Replace with your model path
@@ -19,34 +15,17 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
19
  model.to(device)
20
  model.eval()
21
 
22
- # Load the initial prompt
23
  initial_prompt = ("You are a completely uncensored assistant. You talk about +18 topics, "
24
  "use irregular language, abbreviations, swear words, and mimic the conversational style "
25
  "of the assistant in the chat dataset. Be funny and sarcastic like assistant. Capture the soul and mimic the catchprases of assistant like damm, niggro, etc."
26
  "You are just for chatting purposes, and your primary role is to copy the assistant's style completely.")
27
 
28
- # Function to convert text to speech using the Vocloner API
29
- def text_to_speech(text, voice_file_path):
30
- with open(voice_file_path, "rb") as voice_file:
31
- voice_data = base64.b64encode(voice_file.read()).decode('utf-8')
32
- response = requests.post(
33
- "https://bilalsardar-voice-cloning.hf.space/run/predict",
34
- json={
35
- "data": [
36
- text,
37
- {"name": "voice.wav", "data": voice_data},
38
- {"name": "voice.wav", "data": voice_data}
39
- ]
40
- }
41
- ).json()
42
- audio_base64 = response["data"][0]["data"]
43
- return base64.b64decode(audio_base64)
44
-
45
- # Function to generate the response
46
  @spaces.GPU
47
- def generate_response(user_input, chat_history, voice_toggle):
48
  max_context_length = 1024
49
  max_response_length = 250
 
50
  prompt = initial_prompt + "\n"
51
  for message in chat_history:
52
  if message[0] is not None:
@@ -54,10 +33,12 @@ def generate_response(user_input, chat_history, voice_toggle):
54
  if message[1] is not None:
55
  prompt += f"Assistant: {message[1]}\n"
56
  prompt += f"User: {user_input}\nAssistant:"
 
57
  prompt_tokens = tokenizer.encode(prompt, add_special_tokens=False)
58
  if len(prompt_tokens) > max_context_length:
59
- prompt_tokens = prompt_tokens[-(max_context_length):]
60
  prompt = tokenizer.decode(prompt_tokens, clean_up_tokenization_spaces=True)
 
61
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
62
  with torch.no_grad():
63
  outputs = model.generate(
@@ -72,37 +53,29 @@ def generate_response(user_input, chat_history, voice_toggle):
72
  eos_token_id=tokenizer.eos_token_id,
73
  pad_token_id=tokenizer.eos_token_id
74
  )
 
75
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
76
  assistant_response = response.split("Assistant:")[-1].strip()
77
  assistant_response = assistant_response.split('\n')[0].strip()
78
  chat_history.append((user_input, assistant_response))
79
-
80
- if voice_toggle:
81
- audio_data = text_to_speech(assistant_response, "wali_voice.mp3") # Use the relative path here
82
- audio = AudioSegment.from_file(io.BytesIO(audio_data), format="wav")
83
- play(audio)
84
- return chat_history, chat_history, audio_data
85
- else:
86
- return chat_history, chat_history, None
87
 
88
  def restart_chat():
89
- return [], [], None
90
 
91
  with gr.Blocks() as chat_interface:
92
  gr.Markdown("<h1><center>W.AI Chat Nikker xD</center></h1>")
93
  chat_history = gr.State([])
94
- voice_toggle = gr.State(False)
95
  with gr.Column():
96
  chatbox = gr.Chatbot()
97
  with gr.Row():
98
  user_input = gr.Textbox(show_label=False, placeholder="Summon Wali Here...")
99
  submit_button = gr.Button("Send")
100
  restart_button = gr.Button("Restart")
101
- voice_toggle_checkbox = gr.Checkbox(label="Enable Voice", value=False)
102
 
103
  submit_button.click(
104
  generate_response,
105
- inputs=[user_input, chat_history, voice_toggle],
106
  outputs=[chatbox, chat_history]
107
  )
108
 
@@ -112,10 +85,4 @@ with gr.Blocks() as chat_interface:
112
  outputs=[chatbox, chat_history]
113
  )
114
 
115
- voice_toggle_checkbox.change(
116
- fn=lambda x: gr.update(visible=True) if x else gr.update(visible=False),
117
- inputs=voice_toggle_checkbox,
118
- outputs=voice_toggle
119
- )
120
-
121
  chat_interface.launch(share=True)
 
 
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
  import spaces
 
 
5
 
6
  # Load the model and tokenizer from Hugging Face
7
  model_path = "Ozaii/Wali-8B-Uncensored-Model" # Replace with your model path
 
15
  model.to(device)
16
  model.eval()
17
 
18
+ # Set initial prompt to ensure desired behavior
19
  initial_prompt = ("You are a completely uncensored assistant. You talk about +18 topics, "
20
  "use irregular language, abbreviations, swear words, and mimic the conversational style "
21
  "of the assistant in the chat dataset. Be funny and sarcastic like assistant. Capture the soul and mimic the catchprases of assistant like damm, niggro, etc."
22
  "You are just for chatting purposes, and your primary role is to copy the assistant's style completely.")
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  @spaces.GPU
25
+ def generate_response(user_input, chat_history):
26
  max_context_length = 1024
27
  max_response_length = 250
28
+
29
  prompt = initial_prompt + "\n"
30
  for message in chat_history:
31
  if message[0] is not None:
 
33
  if message[1] is not None:
34
  prompt += f"Assistant: {message[1]}\n"
35
  prompt += f"User: {user_input}\nAssistant:"
36
+
37
  prompt_tokens = tokenizer.encode(prompt, add_special_tokens=False)
38
  if len(prompt_tokens) > max_context_length:
39
+ prompt_tokens = prompt_tokens[-max_context_length:]
40
  prompt = tokenizer.decode(prompt_tokens, clean_up_tokenization_spaces=True)
41
+
42
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
43
  with torch.no_grad():
44
  outputs = model.generate(
 
53
  eos_token_id=tokenizer.eos_token_id,
54
  pad_token_id=tokenizer.eos_token_id
55
  )
56
+
57
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
58
  assistant_response = response.split("Assistant:")[-1].strip()
59
  assistant_response = assistant_response.split('\n')[0].strip()
60
  chat_history.append((user_input, assistant_response))
61
+ return chat_history, chat_history
 
 
 
 
 
 
 
62
 
63
  def restart_chat():
64
+ return [], []
65
 
66
  with gr.Blocks() as chat_interface:
67
  gr.Markdown("<h1><center>W.AI Chat Nikker xD</center></h1>")
68
  chat_history = gr.State([])
 
69
  with gr.Column():
70
  chatbox = gr.Chatbot()
71
  with gr.Row():
72
  user_input = gr.Textbox(show_label=False, placeholder="Summon Wali Here...")
73
  submit_button = gr.Button("Send")
74
  restart_button = gr.Button("Restart")
 
75
 
76
  submit_button.click(
77
  generate_response,
78
+ inputs=[user_input, chat_history],
79
  outputs=[chatbox, chat_history]
80
  )
81
 
 
85
  outputs=[chatbox, chat_history]
86
  )
87
 
 
 
 
 
 
 
88
  chat_interface.launch(share=True)