import gradio as gr import requests import os import json import google.generativeai as genai from bs4 import BeautifulSoup from google.ai.generativelanguage_v1beta.types import content from IPython.display import display from IPython.display import Markdown #from groq import Groq # Load environment variables genai.configure(api_key=os.environ["geminiapikey"]) read_key = os.environ.get('HF_TOKEN', None) cx="77f1602c0ff764edb" custom_css = """ #md { height: 400px; font-size: 30px; background: #121212; padding: 20px; color: white; border: 1 px solid white; } """ generation_config = { "temperature": 0.3, "top_p": 0.95, "top_k": 64, "max_output_tokens": 8192, "response_mime_type": "text/plain", } def ground_search(prompt): model = genai.GenerativeModel( model_name="gemini-2.0-pro-exp-02-05", generation_config=generation_config, tools = [ genai.protos.Tool( google_search = genai.protos.Tool.GoogleSearch(), ), ], ) chat_session = model.start_chat( history=[ { "role": "user", "parts": [ "", ], }, { "role": "model", "parts": [ "", ], }, ] ) response = chat_session.send_message(f"{prompt}") #print(response.text) return response.text #api_key = os.getenv('groq') google_api_key = os.getenv('google_search') #API_URL = "https://blavken-flowiseblav.hf.space/api/v1/prediction/fbc118dc-ec00-4b59-acff-600648958be3" def query(payload): API_URL = f"https://www.bing.com/search?q={payload}" response = requests.get(API_URL) return response def querys(payloads): output = query(payloads) print(output) #return result_text # Formuliere die Antwort search_query = f"{payloads} antworte kurz und knapp. antworte auf deutsch. du findest die antwort hier:\n {output}" result = predict(search_query) texte="" for o in output: texte +=o return result #very simple (and extremly fast) websearch def websearch(prompt): headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36" } url = f"https://www.googleapis.com/customsearch/v1?key={google_api_key}&cx={cx}&q={prompt}" response = requests.get(url, headers=headers) data = response.json() # JSON-Daten direkt verarbeiten # Extrahieren des Textes aus den Ergebnissen items = data.get('items', []) results = [item['snippet'] for item in items] result_text = '\n'.join(results) # Formuliere die Antwort search_query = f"{prompt} antworte kurz und knapp. antworte auf deutsch. du findest die antwort hier: {result_text}" result = predict(search_query) display(Markdown(result)) return result return result_text return results def predict(prompt): generation_config = { "temperature": 0.4, "top_p": 0.95, "top_k": 40, "max_output_tokens": 8192, "response_mime_type": "text/plain", } model = genai.GenerativeModel( model_name="gemini-2.0-flash-exp", generation_config=generation_config, ) chat_session = model.start_chat( history=[] ) response = chat_session.send_message(f"{prompt}\n antworte immer auf deutsch") response_value = response.candidates[0].content.parts[0].text return response_value # Create the Gradio interface with gr.Blocks(css=custom_css) as demo: with gr.Row(): details_output = gr.Markdown(label="answer", elem_id="md") #details_output = gr.Textbox(label="Ausgabe", value = f"\n\n\n\n") with gr.Row(): ort_input = gr.Textbox(label="prompt", placeholder="ask anything...") #audio_input=gr.Microphone(type="filepath") with gr.Row(): button = gr.Button("Senden") # Connect the button to the function button.click(fn=query, inputs=ort_input, outputs=details_output) # Launch the Gradio application demo.launch()