prithivMLmods commited on
Commit
179949b
·
verified ·
1 Parent(s): 0704ce3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -13
app.py CHANGED
@@ -17,7 +17,7 @@ def search(query):
17
  with requests.Session() as session:
18
  resp = session.get(
19
  url="https://www.google.com/search",
20
- headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"},
21
  params={"q": term, "num": 3, "udm": 14},
22
  timeout=5,
23
  verify=None,
@@ -29,7 +29,7 @@ def search(query):
29
  link = result.find("a", href=True)
30
  link = link["href"]
31
  try:
32
- webpage = session.get(link, headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"}, timeout=5, verify=False)
33
  webpage.raise_for_status()
34
  visible_text = extract_text_from_webpage(webpage.text)
35
  if len(visible_text) > max_chars_per_page:
@@ -39,7 +39,9 @@ def search(query):
39
  all_results.append({"link": link, "text": None})
40
  return all_results
41
 
 
42
  client_gemma = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
 
43
  client_llama = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
44
 
45
  func_caller = []
@@ -57,8 +59,7 @@ def respond(message, history):
57
  func_caller.append({"role": "user", "content": f"{str(msg[0])}"})
58
  func_caller.append({"role": "assistant", "content": f"{str(msg[1])}"})
59
 
60
- message_text = message["text"]
61
- func_caller.append({"role": "user", "content": f'[SYSTEM]You are a helpful assistant. You have access to the following functions: \n {str(functions_metadata)}\n\nTo use these functions respond with:\n<functioncall> {{ "name": "function_name", "arguments": {{ "arg_1": "value_1", "arg_1": "value_1", ... }} }} </functioncall> [USER] {message_text}'})
62
 
63
  response = client_gemma.chat_completion(func_caller, max_tokens=200)
64
  response = str(response)
@@ -80,11 +81,11 @@ def respond(message, history):
80
  web_results = search(query)
81
  gr.Info("Extracting relevant Info")
82
  web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results if res['text']])
83
- messages = f"Web Dac uses the user agents of Mozilla, AppleWebKit, and Safari browsers for chat responses and human context mimicking."
84
  for msg in history:
85
  messages += f"\nuser\n{str(msg[0])}"
86
  messages += f"\nassistant\n{str(msg[1])}"
87
- messages+=f"\nuser\n{message_text}\nweb_result\n{web2}\nassistant\n"
88
  stream = client_mixtral.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
89
  output = ""
90
  for response in stream:
@@ -92,11 +93,11 @@ def respond(message, history):
92
  output += response.token.text
93
  yield output
94
  else:
95
- messages = f"Web Dac uses the user agents of Mozilla, AppleWebKit, and Safari browsers for chat responses and human context mimicking."
96
  for msg in history:
97
  messages += f"\nuser\n{str(msg[0])}"
98
  messages += f"\nassistant\n{str(msg[1])}"
99
- messages+=f"\nuser\n{message_text}\nassistant\n"
100
  stream = client_llama.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
101
  output = ""
102
  for response in stream:
@@ -104,11 +105,11 @@ def respond(message, history):
104
  output += response.token.text
105
  yield output
106
  except:
107
- messages = f"Web Dac uses the user agents of Mozilla, AppleWebKit, and Safari browsers for chat responses and human context mimicking."
108
  for msg in history:
109
  messages += f"\nuser\n{str(msg[0])}"
110
  messages += f"\nassistant\n{str(msg[1])}"
111
- messages+=f"\nuser\n{message_text}\nassistant\n"
112
  stream = client_llama.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
113
  output = ""
114
  for response in stream:
@@ -120,8 +121,8 @@ demo = gr.ChatInterface(
120
  fn=respond,
121
  chatbot=gr.Chatbot(show_copy_button=True, likeable=True, layout="panel"),
122
  description=" ",
123
- textbox=gr.Textbox(),
124
- multimodal=False,
125
  concurrency_limit=200,
126
  )
127
- demo.launch()
 
17
  with requests.Session() as session:
18
  resp = session.get(
19
  url="https://www.google.com/search",
20
+ headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"},
21
  params={"q": term, "num": 3, "udm": 14},
22
  timeout=5,
23
  verify=None,
 
29
  link = result.find("a", href=True)
30
  link = link["href"]
31
  try:
32
+ webpage = session.get(link, headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"}, timeout=5, verify=False)
33
  webpage.raise_for_status()
34
  visible_text = extract_text_from_webpage(webpage.text)
35
  if len(visible_text) > max_chars_per_page:
 
39
  all_results.append({"link": link, "text": None})
40
  return all_results
41
 
42
+ # Initialize inference clients for different models
43
  client_gemma = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
44
+ client_mixtral = InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO")
45
  client_llama = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
46
 
47
  func_caller = []
 
59
  func_caller.append({"role": "user", "content": f"{str(msg[0])}"})
60
  func_caller.append({"role": "assistant", "content": f"{str(msg[1])}"})
61
 
62
+ func_caller.append({"role": "user", "content": f'[SYSTEM]You are a helpful assistant. You have access to the following functions: \n {str(functions_metadata)}\n\nTo use these functions respond with:\n<functioncall> {{ "name": "function_name", "arguments": {{ "arg_1": "value_1", "arg_1": "value_1", ... }} }} </functioncall> [USER] {message}'})
 
63
 
64
  response = client_gemma.chat_completion(func_caller, max_tokens=200)
65
  response = str(response)
 
81
  web_results = search(query)
82
  gr.Info("Extracting relevant Info")
83
  web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results if res['text']])
84
+ messages = f"system\nYou are OpenCHAT mini a helpful assistant made by KingNish. You are provided with WEB results from which you can find informations to answer users query in Structured and More better way. You do not say Unnecesarry things Only say thing which is important and relevant. You also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions."
85
  for msg in history:
86
  messages += f"\nuser\n{str(msg[0])}"
87
  messages += f"\nassistant\n{str(msg[1])}"
88
+ messages+=f"\nuser\n{message}\nweb_result\n{web2}\nassistant\n"
89
  stream = client_mixtral.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
90
  output = ""
91
  for response in stream:
 
93
  output += response.token.text
94
  yield output
95
  else:
96
+ messages = f"system\nYou are OpenCHAT mini a helpful assistant made by KingNish. You answers users query like human friend. You are also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions."
97
  for msg in history:
98
  messages += f"\nuser\n{str(msg[0])}"
99
  messages += f"\nassistant\n{str(msg[1])}"
100
+ messages+=f"\nuser\n{message}\nassistant\n"
101
  stream = client_llama.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
102
  output = ""
103
  for response in stream:
 
105
  output += response.token.text
106
  yield output
107
  except:
108
+ messages = f"system\nYou are OpenCHAT mini a helpful assistant made by KingNish. You answers users query like human friend. You are also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions."
109
  for msg in history:
110
  messages += f"\nuser\n{str(msg[0])}"
111
  messages += f"\nassistant\n{str(msg[1])}"
112
+ messages+=f"\nuser\n{message}\nassistant\n"
113
  stream = client_llama.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
114
  output = ""
115
  for response in stream:
 
121
  fn=respond,
122
  chatbot=gr.Chatbot(show_copy_button=True, likeable=True, layout="panel"),
123
  description=" ",
124
+ textbox=gr.Textbox(), # Changed to Textbox
125
+ multimodal=False, # Disabled multimodal
126
  concurrency_limit=200,
127
  )
128
+ demo.launch(share=True)