A newer version of this model is available: mattshumer/Reflection-Llama-3.1-70B

import gradio as gr from transformers import pipeline, GPT2Tokenizer, GPT2LMHeadModel import torch from datetime import datetime

Laad GPT-2 model en tokenizer voor meer controle

model_name = "gpt2" model = GPT2LMHeadModel.from_pretrained(model_name) tokenizer = GPT2Tokenizer.from_pretrained(model_name)

Zet het model in evaluatie-modus

model.eval()

Functie om de tokenslimiet in de gaten te houden

def manage_token_limit(history, max_tokens=1000): # Check of de geschiedenis te groot wordt tokenized_history = tokenizer.encode(history) if len(tokenized_history) > max_tokens: # Trim de geschiedenis return tokenizer.decode(tokenized_history[-max_tokens:]) else: return history

Functie om AI-respons te genereren met context

def generate_response(user_input, chat_history, temperature=0.7, top_k=50, top_p=0.9, max_length=100): # Voeg user input toe aan de geschiedenis new_history = chat_history + f"\nUser: {user_input}\nAI:"

# Trim de geschiedenis als die te lang is
new_history = manage_token_limit(new_history)

# Tokeniseer de geschiedenis
inputs = tokenizer.encode(new_history, return_tensors='pt')

# Genereer tekst met variatie in temperatuur en top-k sampling
outputs = model.generate(inputs, max_length=max_length, temperature=temperature, 
                         top_k=top_k, top_p=top_p, pad_token_id=tokenizer.eos_token_id)

# Decodeer de output en voeg deze toe aan de geschiedenis
generated_text = tokenizer.decode(outputs[:, inputs.shape[-1]:][0], skip_special_tokens=True)

new_history += generated_text + "\n"

return generated_text, new_history

Functie voor het loggen van conversaties

def log_conversation(user_input, response): # Simpele logging naar een bestand with open("chat_logs.txt", "a") as log_file: log_file.write(f"{datetime.now()} | User: {user_input} | AI: {response}\n")

Gradio interface-functie die interactie en instellingen beheert

def chatbot_interface(user_input, chat_history, temperature=0.7, top_k=50, top_p=0.9): # Genereer AI-reactie ai_response, updated_history = generate_response(user_input, chat_history, temperature, top_k, top_p)

# Log de conversatie
log_conversation(user_input, ai_response)

return ai_response, updated_history

Gradio UI setup

with gr.Blocks() as demo: # Titel en beschrijving gr.Markdown("# Geavanceerde AI Chatbot met Variatie") gr.Markdown("Deze chatbot gebruikt GPT-2 om geavanceerde, variabele antwoorden te genereren.")

# Input veld en conversatiegeschiedenis
chat_history = gr.State(value="")  # Houdt de volledige geschiedenis bij

with gr.Row():
    user_input = gr.Textbox(lines=2, placeholder="Typ hier je vraag...")

# Instellingen voor AI variatie
with gr.Row():
    temperature = gr.Slider(0.1, 1.0, value=0.7, label="Temperature (Creativiteit)")
    top_k = gr.Slider(1, 100, value=50, label="Top-k Sampling")
    top_p = gr.Slider(0.1, 1.0, value=0.9, label="Top-p Sampling")

# Output veld voor het AI antwoord
ai_output = gr.Textbox(label="AI Response")

# Start de chatbot
submit_button = gr.Button("Submit")
submit_button.click(chatbot_interface, 
                    inputs=[user_input, chat_history, temperature, top_k, top_p], 
                    outputs=[ai_output, chat_history])

# Reset knop
reset_button = gr.Button("Reset Chat")
reset_button.click(lambda: "", outputs=chat_history)

Start de Gradio interface

demo.launch()

Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model is not currently available via any of the supported Inference Providers.
The model cannot be deployed to the HF Inference API: The HF Inference API does not support text2text-generation models for bertopic library.

Model tree for Smiley777/chatdog

Base model

Qwen/Qwen2-VL-7B
Finetuned
(162)
this model

Dataset used to train Smiley777/chatdog