Artificial-superintelligence commited on
Commit
6138a65
·
verified ·
1 Parent(s): c1b8194

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -4
app.py CHANGED
@@ -1,17 +1,66 @@
1
  import numpy as np
2
  import tensorflow as tf
3
  from tensorflow.keras.utils import to_categorical
 
 
4
  from sklearn.preprocessing import LabelEncoder
5
  import gradio as gr
6
-
7
- # Load the saved model
8
- model = tf.keras.models.load_model("enhanced_adaptive_model.keras", custom_objects={'EnhancedTransformerBlock': EnhancedTransformerBlock})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  # Initialize global variables
11
  sequence_length = 10
12
  data = [] # This will store the recent outcomes
13
  encoder = LabelEncoder()
14
- encoder.classes_ = np.load('label_encoder_classes.npy', allow_pickle=True)
 
 
 
 
 
 
 
 
 
15
 
16
  def update_data(data, new_outcome):
17
  data.append(new_outcome)
 
1
  import numpy as np
2
  import tensorflow as tf
3
  from tensorflow.keras.utils import to_categorical
4
+ from tensorflow.keras.layers import Layer, Dense, Dropout, LayerNormalization, MultiHeadAttention
5
+ from tensorflow.keras.models import Sequential
6
  from sklearn.preprocessing import LabelEncoder
7
  import gradio as gr
8
+ import os
9
+
10
+ # Define the EnhancedTransformerBlock class
11
+ class EnhancedTransformerBlock(Layer):
12
+ def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1, **kwargs):
13
+ super(EnhancedTransformerBlock, self).__init__(**kwargs)
14
+ self.embed_dim = embed_dim
15
+ self.num_heads = num_heads
16
+ self.ff_dim = ff_dim
17
+ self.rate = rate
18
+
19
+ self.att = MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
20
+ self.ffn = Sequential([
21
+ Dense(ff_dim, activation="relu"),
22
+ Dense(embed_dim),
23
+ ])
24
+ self.layernorm1 = LayerNormalization(epsilon=1e-6)
25
+ self.layernorm2 = LayerNormalization(epsilon=1e-6)
26
+ self.dropout1 = Dropout(rate)
27
+ self.dropout2 = Dropout(rate)
28
+ self.self_attention = MultiHeadAttention(num_heads=1, key_dim=embed_dim)
29
+
30
+ def call(self, inputs, training=False):
31
+ attn_output = self.att(inputs, inputs)
32
+ attn_output = self.dropout1(attn_output, training=training)
33
+ out1 = self.layernorm1(inputs + attn_output)
34
+ ffn_output = self.ffn(out1)
35
+ ffn_output = self.dropout2(ffn_output, training=training)
36
+ out2 = self.layernorm2(out1 + ffn_output)
37
+ self_attn_output = self.self_attention(out2, out2)
38
+ return self.layernorm2(out2 + self_attn_output)
39
+
40
+ def get_config(self):
41
+ config = super().get_config()
42
+ config.update({
43
+ "embed_dim": self.embed_dim,
44
+ "num_heads": self.num_heads,
45
+ "ff_dim": self.ff_dim,
46
+ "rate": self.rate,
47
+ })
48
+ return config
49
 
50
  # Initialize global variables
51
  sequence_length = 10
52
  data = [] # This will store the recent outcomes
53
  encoder = LabelEncoder()
54
+
55
+ # Try to load the saved model and encoder classes
56
+ try:
57
+ model = tf.keras.models.load_model("enhanced_adaptive_model.keras", custom_objects={'EnhancedTransformerBlock': EnhancedTransformerBlock})
58
+ encoder.classes_ = np.load('label_encoder_classes.npy', allow_pickle=True)
59
+ except FileNotFoundError:
60
+ print("Model or encoder classes file not found. Using a dummy model and encoder for demonstration.")
61
+ # Create a dummy model and encoder for demonstration
62
+ model = tf.keras.Sequential([tf.keras.layers.Dense(10, input_shape=(sequence_length,), activation='softmax')])
63
+ encoder.classes_ = np.array(['Single small', 'Single big', 'Double small', 'Double big', 'Triple'])
64
 
65
  def update_data(data, new_outcome):
66
  data.append(new_outcome)