Bernd-Ebenhoch commited on
Commit
bb16116
·
1 Parent(s): 84e7348

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -372
app.py CHANGED
@@ -6,410 +6,128 @@ Created on Sun Mar 26 21:07:00 2023
6
  """
7
 
8
 
9
- import tensorflow as tf
10
  import numpy as np
 
 
11
  import matplotlib.pyplot as plt
12
- from matplotlib import animation
13
- from matplotlib.animation import FuncAnimation
14
- import matplotlib as mpl
15
  import streamlit as st
 
16
 
17
- from matplotlib import cm
18
-
19
- import matplotlib.pyplot as plt
20
- from sklearn.linear_model import LinearRegression
21
- import mpl_toolkits.mplot3d as a3
22
- import matplotlib.colors as colors
23
- from matplotlib.colors import LightSource
24
- from tensorflow import keras
25
- import pandas as pd
26
-
27
- from transformers import pipeline
28
- import transformers
29
-
30
-
31
- # Farben definieren
32
- cb = [15/255, 25/255, 35/255]
33
- cf = [25/255*2, 35/255*2, 45/255*2]
34
- w = [242/255, 242/255, 242/255]
35
- blue = [68/255, 114/255, 196/255]
36
- orange = [197/255, 90/255, 17/255]
37
-
38
-
39
- # Pipelines definieren
40
- #en_de_translator = pipeline("translation_de_to_en", model='google/bert2bert_L-24_wmt_de_en')
41
- qa_pipeline = pipeline("question-answering", model='deepset/gelectra-base-germanquad')
42
- sentiment = pipeline("text-classification", model='oliverguhr/german-sentiment-bert')
43
-
44
- tab1, tab2, tab3, tab4 = st.tabs(
45
- ["Künstliche Neuronale Netze", "Wortvektoren Stimmung", "Wörter Maskieren", "HuggingFace Pipelines"])
46
-
47
-
48
- with tab1:
49
- st.markdown(
50
- 'Definieren Sie ein neuronales Netz und beobachten Sie wie sich die Kurve krümmen kann, um die Daten zu fitten')
51
-
52
- col1, col2 = tab1.columns(2)
53
- size = np.array([12., 27., 32., 47., 58., 56., 58., 61.,
54
- 64., 67., 70., 80., 84., 88., 108.])
55
- price = np.array([88., 135., 178., 216., 220., 246., 241., 275.,
56
- 305., 267., 297., 310., 292., 317., 422.])
57
- location = np.array([2., 2., 0., 1., 2., 0., 1., 0., 1., 2., 0., 2., 1., 1., 2.])
58
- price[location == 1] = price[location == 1]*1+30
59
- price[location == 2] = price[location == 2]*1+60
60
-
61
- size_location = np.concatenate((size.reshape(-1, 1), location.reshape(-1, 1)), axis=1)
62
-
63
- data = np.concatenate((size.reshape(-1, 1), location.reshape(-1, 1),
64
- price.reshape(-1, 1)), axis=1)
65
- data = pd.DataFrame(data, columns=['Wohnungsgröße (qm)', 'Ort', 'Preis (k€)'])
66
-
67
- col1.dataframe(data.style.format(precision=0))
68
- #edited_df = st.experimental_data_editor(data)
69
- edited_df = data
70
-
71
- edited_data = edited_df.to_numpy()
72
- size_location = edited_data[:, :2]
73
- price = edited_data[:, 2]
74
-
75
- string = col2.text_area(
76
- 'Architektur des neuronalen Netzes. Anzahl der Neuronen in den verdeckten Schichten', value='4', height=275)
77
- layers = string.split('\n')
78
-
79
- if st.button('Modell trainieren und Fit-Kurve darstellen'):
80
-
81
- with st.spinner('Der Fit-Prozess kann einige Sekunden dauern ...'):
82
-
83
- model = keras.models.Sequential()
84
-
85
- if len(layers) > 0:
86
- for neurons in layers:
87
- model.add(keras.layers.Dense(int(neurons), activation='tanh'))
88
- model.add(keras.layers.Dense(1, activation='tanh'))
89
-
90
- model.compile(loss='binary_crossentropy', optimizer='SGD')
91
-
92
- lr_reduction = keras.callbacks.ReduceLROnPlateau(
93
- monitor='loss', patience=1000, min_lr=0.00001)
94
-
95
- model.fit(size_location/[120, 2], price/500, epochs=5000,
96
- batch_size=4, callbacks=lr_reduction, verbose=False)
97
-
98
- y_pred = model.predict((size_location)/[120, 2], verbose=False).reshape(-1)*500
99
-
100
- x = np.linspace(0, 125, 400)
101
- y = np.linspace(0, 2, 400)
102
- X, Y = np.meshgrid(x, y)
103
-
104
- Z = np.concatenate([X.reshape(-1, 1)/120, Y.reshape(-1, 1)/2], axis=1)
105
- Z = model.predict(Z, verbose=False)*500
106
-
107
- Z = Z.reshape(len(y), len(x))
108
-
109
- fig = plt.figure(facecolor=cb, figsize=(7, 7))
110
- ax = fig.add_subplot(projection='3d')
111
- ax.tick_params(color=w, labelcolor=w, labelsize=12)
112
- ax.set_facecolor(cb)
113
-
114
- ax.w_xaxis.set_pane_color(cf)
115
- ax.w_yaxis.set_pane_color(cf)
116
- ax.w_zaxis.set_pane_color(cf)
117
-
118
- ax.set_yticks([0, 1, 2])
119
- ax.view_init(25, 50)
120
-
121
- rgb = np.tile(orange, (Z.shape[0], Z.shape[1], 1))
122
-
123
- ls = LightSource(azdeg=315, altdeg=45, hsv_min_val=0.9,
124
- hsv_max_val=1, hsv_min_sat=1, hsv_max_sat=0)
125
- illuminated_surface = ls.shade_rgb(rgb, Z)
126
-
127
- below_price = price[price < y_pred]
128
- below_location = location[price < y_pred]
129
- below_size = size[price < y_pred]
130
-
131
- ax.plot(below_size, below_location, below_price, '.', markersize=20, color=blue)
132
-
133
- ax.plot_surface(X, Y, Z, facecolors=illuminated_surface, edgecolors=[0, 0, 0, 0],
134
- linewidth=0, antialiased=True, rcount=400, ccount=400, alpha=0.8)
135
-
136
- above_price = price[price >= y_pred]
137
- above_location = location[price >= y_pred]
138
- above_size = size[price >= y_pred]
139
-
140
- ax.plot(above_size, above_location, above_price,
141
- '.', markersize=20, color=blue, zorder=20,)
142
-
143
- ax.set_ylim(2, 0)
144
- ax.set_xlim(125, 0)
145
- ax.set_zlim(0, 450)
146
-
147
- ax.set_xlabel('Wohnungsgröße (qm)', color=w, fontsize=15, labelpad=10)
148
- ax.set_ylabel('Ort', color=w, fontsize=15, labelpad=10)
149
- ax.set_zlabel('Preis (k€)', color=w, fontsize=15, rotation=270, labelpad=10)
150
-
151
- st.pyplot(fig)
152
-
153
-
154
- # %%
155
- with tab2:
156
-
157
- st.markdown(
158
- 'Definieren Sie Sätze, die positiv oder negativ gestimmt sind und beobachten Sie die resultierenden Wort-Vektoren.')
159
-
160
- text_input_2 = '1: Das schöne Allgäu\n' + \
161
- '1: So toll hier im Allgäu\n' + \
162
- '1: Uns gefallen die Berge und Seen\n' + \
163
- '1: Wir mögen die Landschaft und die Berge\n' + \
164
- '1: Ganz toll im Allgäu\n' + \
165
- '1: Wir mögen das Allgäu\n' + \
166
- '0: Uns gefiel es leider nicht\n' + \
167
- '0: Bei Regen ist es total langweilig\n' + \
168
- '0: Ganz langweilig!\n' + \
169
- '0: So schade, dass es oft Regen gibt\n' + \
170
- '0: Sehr schade, wir konnten gar nicht skifahren\n' + \
171
- '0: Das gefiel uns überhaupt nicht'
172
-
173
- string_2 = st.text_area('', value=text_input_2, height=275)
174
- texts_2 = string_2.split('\n')
175
-
176
- text = []
177
- labels = []
178
- for element in texts_2:
179
- if element != '':
180
- label_element, text_element = element.split(':')
181
- text.append(text_element)
182
- labels.append(float(label_element))
183
-
184
- if st.button('Modell trainieren und Wort-Vektoren darstellen', key=1):
185
- with st.spinner('Der Fit-Prozess kann einige Sekunden dauern ...'):
186
-
187
- vectorizer = tf.keras.layers.TextVectorization(
188
- max_tokens=1000, output_sequence_length=7)
189
-
190
- vectorizer.adapt(text)
191
-
192
- model = tf.keras.models.Sequential()
193
- model.add(vectorizer)
194
-
195
- model.add(tf.keras.layers.Embedding(vectorizer.vocabulary_size(), 2))
196
- # model.add(tf.keras.layers.Dropout(0.6))
197
- model.add(tf.keras.layers.LSTM(1, return_sequences=False, activation='sigmoid'))
198
- # model.add(tf.keras.layers.Flatten())
199
- #model.add(tf.keras.layers.Dense(1, activation='sigmoid', use_bias=False, trainable=True))
200
-
201
- model.summary()
202
-
203
- model.compile(optimizer='adam', loss='binary_crossentropy',
204
- metrics=['accuracy'])
205
-
206
- model.fit(text, labels, epochs=2000, verbose=False)
207
-
208
- # Word Vektoren grafisch darstellen
209
- cb = [15/255, 25/255, 35/255]
210
- cf = [25/255*2, 35/255*2, 45/255*2]
211
- w = [242/255, 242/255, 242/255]
212
- blue = [68/255, 114/255, 196/255]
213
- orange = [197/255, 90/255, 17/255]
214
-
215
- fig = plt.figure(facecolor=cb, figsize=(7, 7))
216
- ax = fig.add_subplot()
217
- ax.tick_params(color=w, labelcolor=w, labelsize=12)
218
- ax.set_facecolor(cb)
219
-
220
- y_pred = model.predict(np.array(vectorizer.get_vocabulary(
221
- include_special_tokens=False)).reshape(-1, 1))
222
-
223
- embed_model = tf.keras.models.Model(model.input, model.layers[1].output)
224
- X_embed = embed_model(np.array(vectorizer.get_vocabulary(
225
- include_special_tokens=False)).reshape(-1, 1))[:, 0, :]
226
-
227
- # 1. Dimension der Wort-Vektoren auf X-Achse,
228
- # 2. Dimension auf y-Achse, 3. auf die Z-Achse abbilden
229
- ax.scatter(X_embed[:, 0], X_embed[:, 1],
230
- c=y_pred, cmap='coolwarm')
231
- for i in range(vectorizer.vocabulary_size()-2):
232
- ax.text(X_embed[i, 0], X_embed[i, 1],
233
- vectorizer.get_vocabulary(include_special_tokens=False)[i],
234
- color=w)
235
-
236
- ax.set_ylim(-2, 2)
237
- ax.set_xlim(-2, 2)
238
-
239
- ax.set_xticks([-2, -1, 0, 1, 2])
240
- ax.set_yticks([-2, -1, 0, 1, 2])
241
-
242
- ax.spines['bottom'].set_color(w)
243
- ax.spines['top'].set_color(w)
244
- ax.spines['right'].set_color(w)
245
- ax.spines['left'].set_color(w)
246
-
247
- ax.set_xlabel('Dimension 1', color=w, fontsize=15, labelpad=10)
248
- ax.set_ylabel('Dimension 2', color=w, fontsize=15, labelpad=10)
249
-
250
- # get the mappable, the 1st and the 2nd are the x and y axes
251
-
252
- PCM = ax.get_children()[0]
253
- cbar = plt.colorbar(PCM, ax=ax, fraction=0.036, pad=0.090)
254
- cbar.set_ticks([])
255
-
256
- cbar.set_label(
257
- '<- positiv Stimmung negativ ->', fontsize=12, color=w, rotation=270, labelpad=12)
258
-
259
- ax.set_title('Epoche 2000', color=w, fontsize=15)
260
- st.pyplot(fig)
261
-
262
-
263
- # %%
264
- with tab3:
265
-
266
- st.markdown(
267
- 'Definieren Sie Sätze bei denen beliebige Wörter maskiert werden, um allgemeine Bezüge in Wort-Vektoren abzubilden.')
268
-
269
- text_input = 'Das schöne Allgäu\n' + \
270
- 'Das wunderbare Allgäu\n' + \
271
- 'Das grüne Allgäu\n' + \
272
- 'Radfahren im Allgäu\n' + \
273
- 'Wandern im Allgäu\n' + \
274
- 'Radfahren in Oberschwaben\n' + \
275
- 'Urlaub in Oberschwaben\n' + \
276
- 'Künstliche Intelligenz für das Allgäu\n' + \
277
- 'Künstliche Intelligenz für Oberschwaben\n' + \
278
- 'Data Science für Oberschwaben\n' + \
279
- 'Data Science und Machine Learning\n' + \
280
- 'Machine Learning für das Allgäu'
281
-
282
- string = st.text_area('', value=text_input, height=275)
283
- text = string.split('\n')
284
-
285
- if st.button('Modell trainieren und Wort-Vektoren darstellen', key=2):
286
- with st.spinner('Der Fit-Prozess kann einige Sekunden dauern ...'):
287
-
288
- vectorizer = tf.keras.layers.TextVectorization(
289
- max_tokens=1000, output_sequence_length=7)
290
-
291
- vectorizer.adapt(text)
292
-
293
- def generator():
294
- while True:
295
- x = vectorizer(text)
296
- mask = tf.reduce_max(x)+1
297
-
298
- lengths = tf.argmin(x, axis=1)
299
- lengths = tf.cast(lengths, tf.float32)
300
-
301
- masks = tf.random.uniform(shape=(x.shape[0],), minval=0, maxval=lengths)
302
- masks = tf.cast(masks, tf.int32)
303
-
304
- masks = tf.one_hot(masks, x.shape[1], dtype=tf.int32)
305
- masks = tf.cast(masks, tf.bool)
306
-
307
- y = x[masks]
308
- masks = tf.cast(masks, tf.int64)
309
- x = x * (1-masks) + mask * masks
310
- yield x, y
311
 
312
- # data = tf.data.Dataset.from_tensor_slices(vectorizer(text),vectorizer(text))
313
- # data = data.map(masking_generator)
314
- model = tf.keras.models.Sequential()
315
 
316
- model.add(tf.keras.layers.Embedding(vectorizer.vocabulary_size()+1, 3))
317
 
318
- model.add(tf.keras.layers.LSTM(100, return_sequences=False, activation='sigmoid'))
319
- model.add(tf.keras.layers.Dense(vectorizer.vocabulary_size(), activation='softmax'))
 
 
320
 
321
- model.summary()
 
 
322
 
323
- model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',
324
- metrics=['accuracy'])
 
 
 
 
 
325
 
326
- lr_reduce = tf.keras.callbacks.ReduceLROnPlateau(
327
- monitor='loss', patience=500, min_lr=1e-6)
328
- model.fit(generator(), steps_per_epoch=1,
329
- epochs=3000, callbacks=lr_reduce, verbose=False)
330
 
331
- fig = plt.figure(facecolor=cb, figsize=(7, 7))
332
- ax = fig.add_subplot()
333
- ax.tick_params(color=w, labelcolor=w, labelsize=12)
334
- ax.set_facecolor(cb)
335
 
336
- embed_model = tf.keras.models.Model(model.input, model.layers[0].output)
337
- X_embed = embed_model(vectorizer(vectorizer.get_vocabulary(
338
- include_special_tokens=False)))[:, 0, :]
339
 
340
- # 1. Dimension der Wort-Vektoren auf X-Achse,
341
- # 2. Dimension auf y-Achse, 3. auf die Z-Achse abbilden
342
- ax.scatter(X_embed[:, 0], X_embed[:, 1],
343
- color=blue)
344
- for i in range(vectorizer.vocabulary_size()-2):
345
- ax.text(X_embed[i, 0], X_embed[i, 1],
346
- vectorizer.get_vocabulary(include_special_tokens=False)[i],
347
- color=w)
348
 
349
- ax.set_ylim(-2, 2)
350
- ax.set_xlim(-2, 2)
351
 
352
- ax.set_xticks([-2, -1, 0, 1, 2])
353
- ax.set_yticks([-2, -1, 0, 1, 2])
354
 
355
- ax.spines['bottom'].set_color(w)
356
- ax.spines['top'].set_color(w)
357
- ax.spines['right'].set_color(w)
358
- ax.spines['left'].set_color(w)
359
 
360
- ax.set_xlabel('Dimension 1', color=w, fontsize=15, labelpad=10)
361
- ax.set_ylabel('Dimension 2', color=w, fontsize=15, labelpad=10)
362
 
363
- st.pyplot(fig)
364
 
 
365
 
366
- # %%
367
- with tab4:
368
- # st.header("Übersetzung: Deutsch --> Englisch")
369
- #st.text("Übersetzung: Deutsch --> Englisch")
370
- st.markdown('Probieren Sie verschiedene Pipelines der Transformer-Bibliothek von HuggingFace.')
371
 
372
- text_input_4 = 'Was ist der Schwerpunkt?'
373
 
374
- string_4 = st.text_area('Frage zum Kontext beantworten', value=text_input_4, height=25)
375
 
376
- text_input_5 = 'Wir unterstützen Unternehmen bei der Datenanalyse durch individuelle Beratung und Projekte mit besonderem Fokus auf maschinelles Lernen und Deep Learning.'
 
 
 
 
 
 
 
377
 
378
- string_5 = st.text_area('Kontext', value=text_input_5, height=75)
 
379
 
380
- if st.button('Ein fertig trainiertes Transformer-Modell von HuggingFace anwenden', key=4):
381
- with st.spinner('Die Beantwortung der Frage kann einige Sekunden dauern ...'):
 
 
 
 
382
 
383
- a5 = qa_pipeline(question=string_4, context=string_5)
384
- st.text(a5)
 
 
 
385
 
386
- ############################################################
 
 
 
 
 
 
 
 
 
387
 
388
- st.text('')
389
- st.markdown("""<hr style="height:10px;border:none;color:#333;background-color:#333;" /> """,
390
- unsafe_allow_html=True)
391
- st.text('')
392
 
393
- text_input_7 = 'Wir lieben Data Science!'
 
 
 
 
394
 
395
- string_7 = st.text_area('Stimmungsanalyse', value=text_input_7, height=25)
 
 
396
 
397
- if st.button('Ein fertig trainiertes Transformer-Modell von HuggingFace anwenden', key=5):
398
- with st.spinner('Die Beurteilung der Stimmung kann einige Sekunden dauern ...'):
 
399
 
400
- a5 = sentiment(string_7)
401
- st.text(a5[0]['label'])
 
402
 
403
- ############################################################
404
- st.text('')
405
- st.markdown("""<hr style="height:10px;border:none;color:#333;background-color:#333;" /> """,
406
- unsafe_allow_html=True)
407
- st.text('')
408
 
409
- references = 'Verwendete Modelle:\n' + \
410
- '\n\nFrage beantworten:\n' + \
411
- 'deepset/gelectra-base-germanquad, Autoren: Timo Möller, Julian Risch, Malte Pietsch' + \
412
- '\n\nStimmung:\n' + \
413
- 'oliverguhr/german-sentiment-bert, Autoren: Oliver Guhr, Anne-Kathrin Schumann, Frank Bahrmann, Hans Joachim Böhme'
414
 
415
- st.markdown(references)
 
6
  """
7
 
8
 
 
9
  import numpy as np
10
+ import tensorflow as tf
11
+ from tensorflow import keras
12
  import matplotlib.pyplot as plt
 
 
 
13
  import streamlit as st
14
+ plt.style.use('mystyle.mplstyle')
15
 
16
+ # Defining the neural network as the agent to chose ad scheme A (0) or B (1)
17
+ model = tf.keras.models.Sequential()
18
+ model.add(tf.keras.layers.Dense(1, activation="sigmoid", input_shape=(1,)))
19
+ model.summary()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
+ information_for_plotting = np.zeros((epochs, 10))
 
 
22
 
 
23
 
24
+ @tf.function()
25
+ def action_selection(model):
26
+ # Using GgradientTape to automatically build gradients with TensorFlow
27
+ with tf.GradientTape() as tape:
28
 
29
+ # As we have no information about the user viewer the ad,
30
+ # the input in the neural network is always the same: 0
31
+ output = model(np.array([[0.0]])) # [0 ... 1]
32
 
33
+ # The output of the neural network is considered as probability for
34
+ # taking action A (0) or B (1)
35
+ # We compare the output with a uniform random variable
36
+ # For example, if the output is 0.8,
37
+ # we have 80% chance that random variable is smaller, taking action B (1)
38
+ # and 20% chance that the random variable is larger, taking action A (0)
39
+ action = (tf.random.uniform((1, 1)) < output) # [0 oder 1]
40
 
41
+ # The loss value measures the difference between the output and the action
42
+ loss = tf.reduce_mean(tf.keras.losses.binary_crossentropy(action, output))
 
 
43
 
44
+ # We are creating the gradients [dloss/dw, dloss/db]
45
+ grads = tape.gradient(loss, model.trainable_variables)
46
+ return output, action, loss, grads
 
47
 
 
 
 
48
 
49
+ st.markdown(
50
+ 'Simulate A/B optimization with policy gradient reinforcement learning')
 
 
 
 
 
 
51
 
 
 
52
 
53
+ learning_rate = st.text_area('Learning rate', value=0.1, height=25)
 
54
 
55
+ prob_A = st.text_area('Click probability of ad A', 0.4, height=75)
 
 
 
56
 
57
+ prob_A = st.text_area('Click probability of ad B', 0.5, height=75)
 
58
 
59
+ epochs = st.text_area('Number of ad impressions (epochs)', 2000, height=75)
60
 
61
+ if st.button('Modell trainieren und Fit-Kurve darstellen'):
62
 
63
+ with st.spinner('Simulating the ad campaign may take a few seconds ...'):
 
 
 
 
64
 
65
+ for epoch in range(epochs):
66
 
67
+ output, action, loss, grads = action_selection(model)
68
 
69
+ # Next we are applying the action by displaying ad A or B
70
+ # As we do not want to wait if a user clicks the ad,
71
+ # we are simulating a click rate
72
+ # Ad A has with 40% click rate a lower chance of being clicked
73
+ # than Ad B with 50% click rate
74
+ # We consider the click rate as a measure of the reward for training
75
+ if action == False: # Action A
76
+ reward = float(np.random.random() < 0.4)
77
 
78
+ if action == True: # Action B
79
+ reward = float(np.random.random() < 0.5)
80
 
81
+ # The gradients obtained above are multiplied with the acquired reward
82
+ # Gradients for actions that lead to clicks are kept unchanged,
83
+ # whereas gradients for actions that do not lead to clicks are reversed
84
+ grads_adjusted = []
85
+ for var_index in range(len(model.trainable_variables)):
86
+ grads_adjusted.append((reward-0.5)*2 * grads[var_index])
87
 
88
+ # Using standard backpropagation, we apply the gradients to update the model parameters
89
+ model.trainable_variables[0].assign(
90
+ model.trainable_variables[0]-lr*grads_adjusted[0])
91
+ model.trainable_variables[1].assign(
92
+ model.trainable_variables[1]-lr*grads_adjusted[1])
93
 
94
+ information_for_plotting[epoch, 0] = output.numpy()[0]
95
+ information_for_plotting[epoch, 1] = action.numpy()[0].astype(int)
96
+ information_for_plotting[epoch, 2] = loss
97
+ information_for_plotting[epoch, 3] = grads[0]
98
+ information_for_plotting[epoch, 4] = grads[1]
99
+ information_for_plotting[epoch, 5] = reward
100
+ information_for_plotting[epoch, 6] = grads_adjusted[0]
101
+ information_for_plotting[epoch, 7] = grads_adjusted[1]
102
+ information_for_plotting[epoch, 8] = copy.deepcopy(model.trainable_variables[0])
103
+ information_for_plotting[epoch, 9] = copy.deepcopy(model.trainable_variables[1])
104
 
105
+ titles = ['Model Output', 'Action', 'Loss', 'Gradients', 'Rewards',
106
+ 'Adjusted Gradients', 'Model Parameters']
107
+ plus = [0, 0, 0, 0, 1, 1, 2]
 
108
 
109
+ fig = plt.figure(figsize=(12, 26))
110
+ fig.subplots(7, 1, sharex=True)
111
+ for i in range(7):
112
+ plt.subplot(7, 1, i+1)
113
+ plt.subplots_adjust(hspace=.0)
114
 
115
+ if i in [0, 1, 2, 4]:
116
+ plt.plot(information_for_plotting[:, i+plus[i]])
117
+ plt.gca().yaxis.set_major_formatter(plt.FormatStrFormatter('%.2f'))
118
 
119
+ else:
120
+ plt.plot(information_for_plotting[:, i+1+plus[i]], label='Bias')
121
+ plt.plot(information_for_plotting[:, i+plus[i]], label='Weight')
122
 
123
+ plt.legend(loc="upper left")
124
+ plt.gca().yaxis.set_major_formatter(plt.FormatStrFormatter('%.2f'))
125
+ plt.ylabel(titles[i])
126
 
127
+ plt.xlabel('Epoch')
128
+ plt.show()
 
 
 
129
 
130
+ st.markdown('Your ad campaign received ' +
131
+ str(int(information_for_plotting[:, 5].sum())) + ' clicks in total.')
 
 
 
132
 
133
+ st.pyplot(fig)