|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
import PyPDF2 |
|
from PIL import Image |
|
import cv2 |
|
import numpy as np |
|
from pydub import AudioSegment |
|
from langdetect import detect |
|
from gtts import gTTS |
|
import os |
|
|
|
|
|
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") |
|
|
|
|
|
def process_pdf(file): |
|
pdf_reader = PyPDF2.PdfReader(file) |
|
text = "" |
|
for page in pdf_reader.pages: |
|
text += page.extract_text() |
|
return text |
|
|
|
|
|
def process_image(file): |
|
image = Image.open(file) |
|
return f"Изображение: {image.size[0]}x{image.size[1]} пикселей, формат: {image.format}" |
|
|
|
|
|
def process_video(file): |
|
cap = cv2.VideoCapture(file.name) |
|
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
|
duration = frame_count / cap.get(cv2.CAP_PROP_FPS) |
|
cap.release() |
|
return f"Видео: длительность {duration:.2f} секунд, {frame_count} кадров" |
|
|
|
|
|
def process_audio(file): |
|
audio = AudioSegment.from_file(file) |
|
return f"Аудио: длительность {len(audio) / 1000:.2f} секунд, частота {audio.frame_rate} Гц" |
|
|
|
|
|
def detect_language(text): |
|
try: |
|
return detect(text) |
|
except: |
|
return "en" |
|
|
|
|
|
def text_to_speech(text, language): |
|
tts = gTTS(text=text, lang=language, slow=False) |
|
audio_file = "output.mp3" |
|
tts.save(audio_file) |
|
return audio_file |
|
|
|
|
|
def respond( |
|
message, |
|
history: list[tuple[str, str]], |
|
system_message, |
|
max_tokens, |
|
temperature, |
|
top_p, |
|
file=None, |
|
): |
|
|
|
if file is not None: |
|
file_type = file.name.split(".")[-1].lower() |
|
if file_type == "pdf": |
|
file_info = process_pdf(file) |
|
elif file_type in ["jpg", "jpeg", "png", "bmp", "gif"]: |
|
file_info = process_image(file) |
|
elif file_type in ["mp4", "avi", "mov"]: |
|
file_info = process_video(file) |
|
elif file_type in ["mp3", "wav", "ogg"]: |
|
file_info = process_audio(file) |
|
else: |
|
file_info = "Неизвестный тип файла" |
|
message += f"\n[Пользователь загрузил файл: {file.name}]\n{file_info}" |
|
|
|
|
|
language = detect_language(message) |
|
|
|
|
|
if language == "ru": |
|
system_message = "Вы дружелюбный чат-бот, который понимает русский язык." |
|
else: |
|
system_message = "You are a friendly chatbot." |
|
|
|
|
|
messages = [{"role": "system", "content": system_message}] |
|
|
|
|
|
for val in history: |
|
if val[0]: |
|
messages.append({"role": "user", "content": val[0]}) |
|
if val[1]: |
|
messages.append({"role": "assistant", "content": val[1]}) |
|
|
|
|
|
messages.append({"role": "user", "content": message}) |
|
|
|
|
|
response = "" |
|
for message in client.chat_completion( |
|
messages, |
|
max_tokens=max_tokens, |
|
stream=True, |
|
temperature=temperature, |
|
top_p=top_p, |
|
): |
|
token = message.choices[0].delta.content |
|
response += token |
|
yield response |
|
|
|
|
|
if response: |
|
audio_file = text_to_speech(response, language) |
|
return response, gr.Audio.update(value=audio_file, visible=True) |
|
else: |
|
return response, gr.Audio.update(visible=False) |
|
|
|
|
|
def reset_chat(): |
|
return [] |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# Felguk v0") |
|
gr.Markdown("Чат-бот Felguk v0. Отвечает на том же языке, на котором вы написали. Задавайте вопросы и загружайте файлы (PDF, изображения, видео, аудио)!") |
|
|
|
|
|
with gr.Row(): |
|
new_chat_button = gr.Button("Новый чат", variant="secondary") |
|
|
|
|
|
chat_interface = gr.ChatInterface( |
|
respond, |
|
additional_inputs=[ |
|
gr.Textbox(value="You are a friendly Chatbot.", label="System message"), |
|
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), |
|
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), |
|
gr.Slider( |
|
minimum=0.1, |
|
maximum=1.0, |
|
value=0.95, |
|
step=0.05, |
|
label="Top-p (nucleus sampling)", |
|
), |
|
gr.File(label="Загрузите файл (опционально)"), |
|
], |
|
) |
|
|
|
|
|
audio_player = gr.Audio(label="Reader", visible=False) |
|
|
|
|
|
new_chat_button.click(fn=reset_chat, outputs=chat_interface.chatbot) |
|
|
|
|
|
chat_interface.chatbot.change( |
|
fn=lambda response: (response, gr.Audio.update(visible=bool(response))), |
|
inputs=chat_interface.chatbot, |
|
outputs=[chat_interface.chatbot, audio_player], |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.launch() |