Dorjzodovsuren commited on
Commit
bbd8adf
·
verified ·
1 Parent(s): 174fb8e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -56
app.py CHANGED
@@ -7,9 +7,8 @@ import time
7
  import tempfile
8
  from huggingface_hub import InferenceClient
9
 
10
- DESCRIPTION = """ # <center><b>JARVIS⚡</b></center>
11
- ### <center>A personal Assistant of Tony Stark for YOU
12
- ### <center>Currently It supports text input, But If this space completes 1k hearts than I starts working on Audio Input.</center>
13
  """
14
 
15
  MORE = """ ## TRY Other Models
@@ -50,64 +49,12 @@ async def generate1(prompt):
50
  await communicate.save(tmp_path)
51
  yield tmp_path
52
 
53
- client2 = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
54
-
55
- system_instructions2 = "<s>[SYSTEM] You are AI assistant named DorjGPT, Keep conversation very short, clear, friendly and concise. The text provided is a request for a specific type of response from you, the virtual assistant. The request asks you to provide friendly responses as if super interlligent AI assistant. The expectation is that I will avoid introductions and start answering the query directly, elaborating on all aspects of the request. As an AI-powered assistant, your task is to generate responses that appear as if they are created by the user and not an AI-powered assistant. [USER]"
56
-
57
- async def generate2(prompt):
58
- generate_kwargs = dict(
59
- temperature=0.6,
60
- max_new_tokens=512,
61
- top_p=0.95,
62
- repetition_penalty=1,
63
- do_sample=True,
64
- )
65
- formatted_prompt = system_instructions2 + prompt + "[ASSISTANT]"
66
- stream = client2.text_generation(
67
- formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
68
- output = ""
69
- for response in stream:
70
- output += response.token.text
71
- output = output.replace("</s>","")
72
-
73
- communicate = edge_tts.Communicate(output)
74
- with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
75
- tmp_path = tmp_file.name
76
- await communicate.save(tmp_path)
77
- yield tmp_path
78
-
79
- client3 = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
80
-
81
- system_instructions3 = "[SYSTEM]The text provided is a request for a specific type of response from me, the virtual assistant. The request asks me to provide detailed and friendly responses as if I am the character Jarvis, inspired by 'Tony Stark.' The expectation is that I will avoid introductions and start answering the query directly, elaborating on all aspects of the request. As an AI-powered assistant, my task is to generate responses that appear as if they are created by the user and not an AI-powered assistant.[USER]"
82
-
83
- async def generate3(prompt):
84
- generate_kwargs = dict(
85
- temperature=0.6,
86
- max_new_tokens=2048,
87
- top_p=0.95,
88
- repetition_penalty=1,
89
- do_sample=True,
90
- )
91
- formatted_prompt = system_instructions3 + prompt + "[ASSISTANT]"
92
- stream = client3.text_generation(
93
- formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
94
- output = ""
95
- for response in stream:
96
- output += response.token.text
97
- output = output.replace("</s>","")
98
-
99
- communicate = edge_tts.Communicate(output)
100
- with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
101
- tmp_path = tmp_file.name
102
- await communicate.save(tmp_path)
103
- yield tmp_path
104
-
105
  with gr.Blocks(css="style.css") as demo:
106
  gr.Markdown(DESCRIPTION)
107
  with gr.Row():
108
  user_input = gr.Textbox(label="Prompt", value="What is Wikipedia")
109
  input_text = gr.Textbox(label="Input Text", elem_id="important")
110
- output_audio = gr.Audio(label="JARVIS", type="filepath",
111
  interactive=False,
112
  autoplay=True,
113
  elem_classes="audio")
 
7
  import tempfile
8
  from huggingface_hub import InferenceClient
9
 
10
+ DESCRIPTION = """ # <center><b>DorjGPT⚡</b></center>
11
+ ### <center>A personal Assistant
 
12
  """
13
 
14
  MORE = """ ## TRY Other Models
 
49
  await communicate.save(tmp_path)
50
  yield tmp_path
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  with gr.Blocks(css="style.css") as demo:
53
  gr.Markdown(DESCRIPTION)
54
  with gr.Row():
55
  user_input = gr.Textbox(label="Prompt", value="What is Wikipedia")
56
  input_text = gr.Textbox(label="Input Text", elem_id="important")
57
+ output_audio = gr.Audio(label="DorjGPT", type="filepath",
58
  interactive=False,
59
  autoplay=True,
60
  elem_classes="audio")