Spaces:
Running
Running
File size: 1,547 Bytes
a936419 085ef0b 1605c68 9371b38 d0645f3 54f37fd 9371b38 a936419 085ef0b 54f37fd 9371b38 0963c3d 085ef0b cb7bc65 6ad3993 cb7bc65 ee8bb54 cb7bc65 9371b38 cb7bc65 79b0e5e 54f37fd 79b0e5e 54f37fd 79b0e5e c3b4363 79b0e5e 5b85257 79b0e5e 5878e17 54f37fd e6bff66 085ef0b 79b0e5e 85deaff 9371b38 5399f24 9371b38 085ef0b 9371b38 e5d9b98 085ef0b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import gradio as gr
import os
import google.generativeai as genai
import logging
import time
#import backoff
# Configure Logging
logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(levelname)s - %(message)s')
# Load environment variables
genai.configure(api_key=os.environ["geminiapikey"])
read_key = os.environ.get('HF_TOKEN', None)
custom_css = """
#md {
height: 400px;
font-size: 30px;
background: #202020;
padding: 20px;
color: white;
border: 1px solid white;
}
"""
def predict(prompt):
# Create the model
generation_config = {
"temperature": 0.4,
"top_p": 0.95,
"top_k": 40,
"max_output_tokens": 2048,
"response_mime_type": "text/plain",
}
model = genai.GenerativeModel(
model_name="gemini-1.5-pro",
generation_config=generation_config,
)
#contents_to_send = [genai.Content(parts=[prompt])]
contents_to_send = [prompt]
response = model.generate_content(contents=contents_to_send, tools='google_search_retrieval')
return response.text
# Create the Gradio interface
with gr.Blocks(css=custom_css) as demo:
with gr.Row():
details_output = gr.Markdown(label="answer", elem_id="md")
with gr.Row():
ort_input = gr.Textbox(label="prompt", placeholder="ask anything...")
with gr.Row():
button = gr.Button("Senden")
# Connect the button to the function
button.click(fn=predict, inputs=ort_input, outputs=details_output)
# Launch the Gradio application
demo.launch() |