Update app.py
Browse files
app.py
CHANGED
@@ -1,18 +1,8 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import json
|
4 |
-
import uuid
|
5 |
-
from PIL import Image
|
6 |
from bs4 import BeautifulSoup
|
7 |
import requests
|
8 |
-
import random
|
9 |
-
from transformers import LlavaProcessor, LlavaForConditionalGeneration, TextIteratorStreamer
|
10 |
-
from threading import Thread
|
11 |
-
import re
|
12 |
-
import time
|
13 |
-
import torch
|
14 |
-
import cv2
|
15 |
-
from gradio_client import Client, file
|
16 |
|
17 |
def extract_text_from_webpage(html_content):
|
18 |
soup = BeautifulSoup(html_content, 'html.parser')
|
@@ -22,13 +12,12 @@ def extract_text_from_webpage(html_content):
|
|
22 |
|
23 |
def search(query):
|
24 |
term = query
|
25 |
-
start = 0
|
26 |
all_results = []
|
27 |
max_chars_per_page = 8000
|
28 |
with requests.Session() as session:
|
29 |
resp = session.get(
|
30 |
url="https://www.google.com/search",
|
31 |
-
headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64
|
32 |
params={"q": term, "num": 3, "udm": 14},
|
33 |
timeout=5,
|
34 |
verify=None,
|
@@ -40,7 +29,7 @@ def search(query):
|
|
40 |
link = result.find("a", href=True)
|
41 |
link = link["href"]
|
42 |
try:
|
43 |
-
webpage = session.get(link, headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64
|
44 |
webpage.raise_for_status()
|
45 |
visible_text = extract_text_from_webpage(webpage.text)
|
46 |
if len(visible_text) > max_chars_per_page:
|
@@ -50,9 +39,7 @@ def search(query):
|
|
50 |
all_results.append({"link": link, "text": None})
|
51 |
return all_results
|
52 |
|
53 |
-
# Initialize inference clients for different models
|
54 |
client_gemma = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
|
55 |
-
client_mixtral = InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO")
|
56 |
client_llama = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
|
57 |
|
58 |
func_caller = []
|
@@ -93,7 +80,7 @@ def respond(message, history):
|
|
93 |
web_results = search(query)
|
94 |
gr.Info("Extracting relevant Info")
|
95 |
web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results if res['text']])
|
96 |
-
messages = f"
|
97 |
for msg in history:
|
98 |
messages += f"\nuser\n{str(msg[0])}"
|
99 |
messages += f"\nassistant\n{str(msg[1])}"
|
@@ -105,7 +92,7 @@ def respond(message, history):
|
|
105 |
output += response.token.text
|
106 |
yield output
|
107 |
else:
|
108 |
-
messages = f"
|
109 |
for msg in history:
|
110 |
messages += f"\nuser\n{str(msg[0])}"
|
111 |
messages += f"\nassistant\n{str(msg[1])}"
|
@@ -117,7 +104,7 @@ def respond(message, history):
|
|
117 |
output += response.token.text
|
118 |
yield output
|
119 |
except:
|
120 |
-
messages = f"
|
121 |
for msg in history:
|
122 |
messages += f"\nuser\n{str(msg[0])}"
|
123 |
messages += f"\nassistant\n{str(msg[1])}"
|
@@ -133,8 +120,8 @@ demo = gr.ChatInterface(
|
|
133 |
fn=respond,
|
134 |
chatbot=gr.Chatbot(show_copy_button=True, likeable=True, layout="panel"),
|
135 |
description=" ",
|
136 |
-
textbox=gr.
|
137 |
-
multimodal=
|
138 |
concurrency_limit=200,
|
139 |
)
|
140 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import json
|
|
|
|
|
4 |
from bs4 import BeautifulSoup
|
5 |
import requests
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
def extract_text_from_webpage(html_content):
|
8 |
soup = BeautifulSoup(html_content, 'html.parser')
|
|
|
12 |
|
13 |
def search(query):
|
14 |
term = query
|
|
|
15 |
all_results = []
|
16 |
max_chars_per_page = 8000
|
17 |
with requests.Session() as session:
|
18 |
resp = session.get(
|
19 |
url="https://www.google.com/search",
|
20 |
+
headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"},
|
21 |
params={"q": term, "num": 3, "udm": 14},
|
22 |
timeout=5,
|
23 |
verify=None,
|
|
|
29 |
link = result.find("a", href=True)
|
30 |
link = link["href"]
|
31 |
try:
|
32 |
+
webpage = session.get(link, headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36"}, timeout=5, verify=False)
|
33 |
webpage.raise_for_status()
|
34 |
visible_text = extract_text_from_webpage(webpage.text)
|
35 |
if len(visible_text) > max_chars_per_page:
|
|
|
39 |
all_results.append({"link": link, "text": None})
|
40 |
return all_results
|
41 |
|
|
|
42 |
client_gemma = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
|
|
|
43 |
client_llama = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
|
44 |
|
45 |
func_caller = []
|
|
|
80 |
web_results = search(query)
|
81 |
gr.Info("Extracting relevant Info")
|
82 |
web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results if res['text']])
|
83 |
+
messages = f"Web Dac uses the user agents of Mozilla, AppleWebKit, and Safari browsers for chat responses and human context mimicking."
|
84 |
for msg in history:
|
85 |
messages += f"\nuser\n{str(msg[0])}"
|
86 |
messages += f"\nassistant\n{str(msg[1])}"
|
|
|
92 |
output += response.token.text
|
93 |
yield output
|
94 |
else:
|
95 |
+
messages = f"Web Dac uses the user agents of Mozilla, AppleWebKit, and Safari browsers for chat responses and human context mimicking."
|
96 |
for msg in history:
|
97 |
messages += f"\nuser\n{str(msg[0])}"
|
98 |
messages += f"\nassistant\n{str(msg[1])}"
|
|
|
104 |
output += response.token.text
|
105 |
yield output
|
106 |
except:
|
107 |
+
messages = f"Web Dac uses the user agents of Mozilla, AppleWebKit, and Safari browsers for chat responses and human context mimicking."
|
108 |
for msg in history:
|
109 |
messages += f"\nuser\n{str(msg[0])}"
|
110 |
messages += f"\nassistant\n{str(msg[1])}"
|
|
|
120 |
fn=respond,
|
121 |
chatbot=gr.Chatbot(show_copy_button=True, likeable=True, layout="panel"),
|
122 |
description=" ",
|
123 |
+
textbox=gr.Textbox(),
|
124 |
+
multimodal=False,
|
125 |
concurrency_limit=200,
|
126 |
)
|
127 |
demo.launch()
|