Spaces:
Running
Running
File size: 4,968 Bytes
a936419 40ce5ac 303a5ba 085ef0b 40ce5ac 1605c68 eb28548 3b9d20d 8047bc1 abae114 085ef0b 40ce5ac 0963c3d cd1062c 085ef0b cb7bc65 6ad3993 cb7bc65 097823d cb7bc65 40ce5ac cb7bc65 ee3485c 3b9d20d 2dde674 3b9d20d 7145d29 cc6919c 7145d29 b09ae24 6051823 1903165 303a5ba 08826a1 1903165 7145d29 3b9d20d abae114 cd1062c a29fe08 4c8ca04 a160b6a 2dde674 82d3b53 2dde674 9ba2efc 4c8ca04 9f192ab 3b77f4e 06a04a6 be5340a ddaa39f 6c48f7f be5340a 0a61873 cb954b3 d1e8811 f227cbb 883fe4b 72701df 883fe4b 72701df d1e8811 45616e1 8047bc1 759417d 45616e1 f681054 bc9f82a 0a61873 ee3485c 51f5b3e ee3485c 6578e3e eb28548 c230eb4 0a61873 085ef0b 79b0e5e 85deaff 7a70be2 5399f24 75cc043 573de21 40ce5ac 085ef0b 7a70be2 e5d9b98 085ef0b 45616e1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
import gradio as gr
import requests
import re
import os
import json
import google.generativeai as genai
from bs4 import BeautifulSoup
from google.ai.generativelanguage_v1beta.types import content
from IPython.display import display
from IPython.display import Markdown
#from groq import Groq
# Load environment variables
genai.configure(api_key=os.environ["geminiapikey"])
read_key = os.environ.get('HF_TOKEN', None)
cx="77f1602c0ff764edb"
custom_css = """
#md {
height: 400px;
font-size: 30px;
background: #121212;
padding: 20px;
color: white;
border: 1 px solid white;
}
"""
generation_config = {
"temperature": 0.3,
"top_p": 0.95,
"top_k": 64,
"max_output_tokens": 8192,
"response_mime_type": "text/plain",
}
def ground_search(prompt):
model = genai.GenerativeModel(
model_name="gemini-2.0-pro-exp-02-05",
generation_config=generation_config,
tools = [
genai.protos.Tool(
google_search = genai.protos.Tool.GoogleSearch(),
),
],
)
chat_session = model.start_chat(
history=[
{
"role": "user",
"parts": [
"",
],
},
{
"role": "model",
"parts": [
"",
],
},
]
)
response = chat_session.send_message(f"{prompt}")
#print(response.text)
return response.text
def duckduckgo(search_term):
url = f"https://duckduckgo.com/?q=impressum+{search_term}&ia=web"
try:
response = requests.get(url)
#response.raise_for_status() # Raises HTTPError for bad responses
s1 = response.text
# Removing HTML tags using Beautiful Soup
s2 = re.sub(r"<.*?>", "", s1)
return s1
#return response.text # Return the content of the response
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return response.text
#api_key = os.getenv('groq')
google_api_key = os.getenv('google_search')
#API_URL = "https://blavken-flowiseblav.hf.space/api/v1/prediction/fbc118dc-ec00-4b59-acff-600648958be3"
def query(payload):
API_URL = f"https://specialist-it.de/bots.php?json={payload}"
try:
response = requests.post(API_URL)
response.raise_for_status() # Raises HTTPError for bad responses
return response.text # Return the content of the response
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return response.text
def querys(payloads):
output = query(payloads)
print(output)
#return result_text
# Formuliere die Antwort
search_query = f"{payloads} antworte kurz und knapp. antworte auf deutsch. du findest die antwort hier:\n {output}"
result = predict(search_query)
texte=""
for o in output:
texte +=o
return result
#very simple (and extremly fast) websearch
def websearch(prompt):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
}
url = f"https://www.googleapis.com/customsearch/v1?key={google_api_key}&cx={cx}&q={prompt}"
response = requests.get(url, headers=headers)
data = response.json() # JSON-Daten direkt verarbeiten
# Extrahieren des Textes aus den Ergebnissen
items = data.get('items', [])
results = [item['snippet'] for item in items]
result_text = '\n'.join(results)
# Formuliere die Antwort
search_query = f"{prompt} antworte kurz und knapp. antworte auf deutsch. du findest die antwort hier: {result_text}"
result = predict(search_query)
display(Markdown(result))
return result
return result_text
return results
def predict(prompt):
generation_config = {
"temperature": 0.4,
"top_p": 0.95,
"top_k": 40,
"max_output_tokens": 8192,
"response_mime_type": "text/plain",
}
model = genai.GenerativeModel(
model_name="gemini-2.0-flash-exp",
generation_config=generation_config,
)
chat_session = model.start_chat(
history=[]
)
response = chat_session.send_message(f"{prompt}\n antworte immer auf deutsch")
response_value = response.candidates[0].content.parts[0].text
return response_value
# Create the Gradio interface
with gr.Blocks(css=custom_css) as demo:
with gr.Row():
details_output = gr.Markdown(label="answer", elem_id="md")
#details_output = gr.Textbox(label="Ausgabe", value = f"\n\n\n\n")
with gr.Row():
ort_input = gr.Textbox(label="prompt", placeholder="ask anything...")
#audio_input=gr.Microphone(type="filepath")
with gr.Row():
button = gr.Button("Senden")
# Connect the button to the function
button.click(fn=websearch, inputs=ort_input, outputs=details_output)
# Launch the Gradio application
demo.launch() |