Tech-Meld commited on
Commit
1122788
·
verified ·
1 Parent(s): fa136e4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -21
app.py CHANGED
@@ -1,34 +1,116 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
 
3
 
4
- model_cache = {}
 
 
 
5
 
6
- def load_model():
7
- model_id = "Tech-Meld/Hajax_Chat_1.0"
8
- tokenizer = AutoTokenizer.from_pretrained(model_id)
9
- model = AutoModelForCausalLM.from_pretrained(model_id)
10
- return model, tokenizer
11
 
12
- def get_response(input_text, model, tokenizer):
13
  inputs = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors='pt')
14
- outputs = model.generate(inputs, max_length=1000, pad_token_id=tokenizer.eos_token_id)
 
 
 
 
 
 
 
15
  response = tokenizer.decode(outputs[:, inputs.shape[-1]:][0], skip_special_tokens=True)
16
  return response
17
 
18
- def chat(input_text):
19
- global model_cache
20
- if "model" not in model_cache:
21
- model_cache["model"], model_cache["tokenizer"] = load_model()
22
- model = model_cache["model"]
23
- tokenizer = model_cache["tokenizer"]
24
- response = get_response(input_text, model, tokenizer)
25
- return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  iface = gr.Interface(
28
- chat,
29
- "text",
30
- "text",
 
 
 
 
 
 
 
 
 
31
  title="Chat with AI",
32
- description="Type your message and press Enter to chat with the AI.",
 
 
 
 
33
  )
34
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import time
4
+ import random
5
 
6
+ # Load the model and tokenizer
7
+ model_id = "Tech-Meld/Hajax_Chat_1.0"
8
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
9
+ model = AutoModelForCausalLM.from_pretrained(model_id)
10
 
11
+ # --- Functions ---
 
 
 
 
12
 
13
+ def get_response(input_text, temperature, top_p, top_k, max_length):
14
  inputs = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors='pt')
15
+ outputs = model.generate(
16
+ inputs,
17
+ max_length=max_length,
18
+ pad_token_id=tokenizer.eos_token_id,
19
+ temperature=temperature,
20
+ top_p=top_p,
21
+ top_k=top_k,
22
+ )
23
  response = tokenizer.decode(outputs[:, inputs.shape[-1]:][0], skip_special_tokens=True)
24
  return response
25
 
26
+ def analyze_text(text):
27
+ num_tokens = len(tokenizer.tokenize(text))
28
+ return {
29
+ "Number of characters": len(text),
30
+ "Number of words": len(text.split()),
31
+ "Number of tokens": num_tokens,
32
+ }
33
+
34
+ # --- Interface ---
35
+
36
+ css = """
37
+ .gradio-container {
38
+ background-color: #f0f0f0; /* Light background for the container */
39
+ }
40
+
41
+ .gradio-interface {
42
+ background-color: rgba(255, 255, 255, 0.8); /* Translucent white background */
43
+ border-radius: 15px; /* Rounded corners */
44
+ padding: 20px;
45
+ box-shadow: 0 0 10px rgba(0, 0, 0, 0.2); /* Subtle shadow */
46
+ }
47
+
48
+ .gradio-button {
49
+ background-color: #4CAF50; /* Green button color */
50
+ color: white;
51
+ border: none;
52
+ padding: 10px 20px;
53
+ text-align: center;
54
+ text-decoration: none;
55
+ display: inline-block;
56
+ font-size: 16px;
57
+ margin: 4px 2px;
58
+ cursor: pointer;
59
+ border-radius: 5px; /* Rounded corners */
60
+ }
61
+
62
+ .gradio-button:hover {
63
+ background-color: #3e8e41; /* Darker green on hover */
64
+ }
65
+
66
+ .gradio-text-area {
67
+ resize: vertical; /* Allow vertical resizing for text areas */
68
+ }
69
+ """
70
 
71
  iface = gr.Interface(
72
+ fn=get_response,
73
+ inputs=[
74
+ gr.Textbox(label="Your message:", lines=5, placeholder="Enter your message here...", show_label=True),
75
+ gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, step=0.1, value=0.7),
76
+ gr.Slider(label="Top p", minimum=0.1, maximum=1.0, step=0.1, value=0.9),
77
+ gr.Slider(label="Top k", minimum=1, maximum=100, step=1, value=50),
78
+ gr.Slider(label="Max length", minimum=10, maximum=1000, step=10, value=250),
79
+ ],
80
+ outputs=[
81
+ gr.TextArea(label="AI Response:", lines=10),
82
+ gr.Label(label="Text Analysis", elem_id="analysis"),
83
+ ],
84
  title="Chat with AI",
85
+ description="Engage in a conversation with our advanced AI model. Customize the response using various parameters.",
86
+ theme="default", # Use a custom theme to override the default Gradio styling
87
+ css=css, # Apply the CSS styles defined earlier
88
+ layout="vertical",
89
+ allow_flagging="never",
90
  )
91
+
92
+ # --- Dynamic Background ---
93
+
94
+ def update_background():
95
+ while True:
96
+ r = random.randint(0, 255)
97
+ g = random.randint(0, 255)
98
+ b = random.randint(0, 255)
99
+ iface.root.style.background_color = f"rgb({r}, {g}, {b})" # Set dynamic background color
100
+ time.sleep(1) # Update every second
101
+
102
+ # Start a separate thread to update the background color
103
+ gr.Interface.update(update_background, inputs=[], outputs=[], live=True)
104
+
105
+ # --- Analysis Logic ---
106
+
107
+ def update_analysis(response):
108
+ analysis = analyze_text(response)
109
+ analysis_str = f"Number of characters: {analysis['Number of characters']}\n" \
110
+ f"Number of words: {analysis['Number of words']}\n" \
111
+ f"Number of tokens: {analysis['Number of tokens']}"
112
+ iface.update(analysis=analysis_str, live=True) # Update analysis section with the generated data
113
+
114
+ iface.outputs[0].postprocess = update_analysis # Update analysis after every response
115
+
116
+ iface.launch(debug=True)