File size: 3,000 Bytes
0ff4a53 b0d7d58 e491829 0ff4a53 bfefae6 0ff4a53 7372484 0ff4a53 2a061bb 0ff4a53 2a061bb 0ff4a53 2a061bb 0ff4a53 2a061bb b0d7d58 e491829 9c5bb0c b0d7d58 9c5bb0c b0d7d58 9c5bb0c 85aab5e edc1af6 85aab5e 0ff4a53 85aab5e 0ff4a53 85aab5e 0ff4a53 b57258a 0ff4a53 6feb8f6 cf002fc e60e5d9 fc92e69 140d082 da22248 fc92e69 da22248 fc92e69 da22248 0ff4a53 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import gradio as gr
from huggingface_hub import InferenceClient
import google.generativeai as genai
import numpy as np
import PIL.Image
import io, os
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
genai.configure(api_key='AIzaSyAtdSFdZ2WZv2TmJYijVz286JBX7HpddQk')
def respond(
message,
history: list[tuple[str, str]],
image,
):
messages = ""
for val in history:
if val[0]:
messages += "{'role': 'user', 'content': "+val[0]+"}"
if val[1]:
messages += "{'role': 'user', 'content': "+val[1]+"}"
messages += "{'role': 'user', 'content': "+message+"}"
print (messages,image)
## for image
if isinstance(image, np.ndarray):
img = PIL.Image.fromarray(image)
else:
try:
img = PIL.Image.open(image)
except (AttributeError, IOError) as e:
return f"Invalid image provided. Please provide a valid image file. Error: {e}"
# Load model
model = genai.GenerativeModel("gemini-pro-vision")
## for image
response = model.generate_content([messages, img])
try:
response = model.generate_content([messages, img])
if not response or not response.text:
return "No valid response received. The response might have been blocked."
# Formatting the response
formatted_response = ""
for line in response.text.split("\n"):
if line.strip().endswith(":"):
formatted_response += f"**{line.strip()}**\n"
else:
formatted_response += line + "\n"
return formatted_response
except ValueError as e:
return f"Error in generating response: {e}"
# response = ""
# for message in client.chat_completion(
# messages,
# max_tokens=max_tokens,
# stream=True,
# temperature=temperature,
# top_p=top_p,
# ):
# token = message.choices[0].delta.content
# response += token
# yield response
"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Image(show_label=False)
],
additional_inputs_accordion=gr.Accordion(open=True),
theme=gr.themes.Soft(),
retry_btn=None,
undo_btn=None,
clear_btn=None;
css="""#component-2{
flex-direction:row !important;
}
#component-3{
min-height: 100% !important;
width: 50% !important;
}
#component-6{
width: 50% !important;
position: absolute !important;
left: 50% !important;
z-index: 100 !important;
bottom: 45% !important;
}
#component-14{
width: 50% !important;
height: 50% !important;
}"""
)
if __name__ == "__main__":
demo.launch() |