TejAndrewsACC commited on
Commit
ed1fed9
·
verified ·
1 Parent(s): 9c9dfc6

Create model.py

Browse files
Files changed (1) hide show
  1. model.py +742 -0
model.py ADDED
@@ -0,0 +1,742 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 The ACC Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ACC-FiPhi-NeuralMark-V3"""
16
+
17
+
18
+
19
+
20
+
21
+
22
+
23
+
24
+
25
+
26
+
27
+
28
+
29
+
30
+
31
+
32
+
33
+
34
+
35
+
36
+
37
+
38
+
39
+
40
+ import random
41
+ import math
42
+ import time
43
+ import os
44
+
45
+
46
+
47
+
48
+ PHI = (1 + math.sqrt(5)) / 2
49
+
50
+
51
+
52
+
53
+ text = os.getenv("TRAINING_DATA.txt")
54
+
55
+
56
+
57
+
58
+ words = text.split()
59
+
60
+
61
+
62
+
63
+ trigram_chain = {}
64
+ for i in range(len(words) - 2):
65
+ key = (words[i], words[i + 1])
66
+ next_word = words[i + 2]
67
+ if key not in trigram_chain:
68
+ trigram_chain[key] = []
69
+ trigram_chain[key].append(next_word)
70
+
71
+
72
+
73
+
74
+
75
+
76
+
77
+
78
+ def generate_text(length):
79
+ if len(words) < 2:
80
+ return ""
81
+ key = random.choice(list(trigram_chain.keys()))
82
+ result = [key[0], key[1]]
83
+ for _ in range(length - 2):
84
+ if key in trigram_chain:
85
+ next_word = random.choice(trigram_chain[key])
86
+ result.append(next_word)
87
+ key = (key[1], next_word)
88
+ else:
89
+ break
90
+ return " ".join(result)
91
+
92
+
93
+
94
+
95
+
96
+
97
+
98
+
99
+ class NeuralNetwork:
100
+ def __init__(self, input_size, hidden_size1, hidden_size2, output_size):
101
+ self.input_size = input_size
102
+ self.hidden_size1 = hidden_size1
103
+ self.hidden_size2 = hidden_size2
104
+ self.output_size = output_size
105
+ self.weights_input_hidden1 = [
106
+ [random.random() for _ in range(input_size)] for _ in range(hidden_size1)
107
+ ]
108
+ self.weights_hidden1_hidden2 = [
109
+ [random.random() for _ in range(hidden_size1)] for _ in range(hidden_size2)
110
+ ]
111
+ self.weights_hidden2_output = [
112
+ [random.random() for _ in range(hidden_size2)] for _ in range(output_size)
113
+ ]
114
+ self.bias_hidden1 = [random.random() for _ in range(hidden_size1)]
115
+ self.bias_hidden2 = [random.random() for _ in range(hidden_size2)]
116
+ self.bias_output = [random.random() for _ in range(output_size)]
117
+
118
+
119
+
120
+
121
+ def sigmoid(self, x):
122
+ return 1 / (1 + math.exp(-x))
123
+
124
+
125
+
126
+
127
+ def sigmoid_derivative(self, x):
128
+ return x * (1 - x)
129
+
130
+
131
+
132
+
133
+ def forward(self, inputs):
134
+ self.hidden_input1 = [
135
+ sum(inputs[i] * self.weights_input_hidden1[j][i] for i in range(self.input_size)) + self.bias_hidden1[j]
136
+ for j in range(self.hidden_size1)
137
+ ]
138
+ self.hidden_output1 = [self.sigmoid(x) for x in self.hidden_input1]
139
+ self.hidden_input2 = [
140
+ sum(self.hidden_output1[i] * self.weights_hidden1_hidden2[j][i] for i in range(self.hidden_size1)) + self.bias_hidden2[j]
141
+ for j in range(self.hidden_size2)
142
+ ]
143
+ self.hidden_output2 = [self.sigmoid(x) for x in self.hidden_input2]
144
+ self.output_input = [
145
+ sum(self.hidden_output2[i] * self.weights_hidden2_output[j][i] for i in range(self.hidden_size2)) + self.bias_output[j]
146
+ for j in range(self.output_size)
147
+ ]
148
+ self.output_output = [self.sigmoid(x) for x in self.output_input]
149
+ return self.output_output
150
+
151
+
152
+
153
+
154
+ def backward(self, inputs, target, learning_rate=0.1):
155
+ output_errors = [target[i] - self.output_output[i] for i in range(self.output_size)]
156
+ output_deltas = [output_errors[i] * self.sigmoid_derivative(self.output_output[i])
157
+ for i in range(self.output_size)]
158
+ hidden2_errors = [
159
+ sum(output_deltas[k] * self.weights_hidden2_output[k][j] for k in range(self.output_size))
160
+ for j in range(self.hidden_size2)
161
+ ]
162
+ hidden2_deltas = [hidden2_errors[j] * self.sigmoid_derivative(self.hidden_output2[j])
163
+ for j in range(self.hidden_size2)]
164
+ hidden1_errors = [
165
+ sum(hidden2_deltas[k] * self.weights_hidden1_hidden2[k][j] for k in range(self.hidden_size2))
166
+ for j in range(self.hidden_size1)
167
+ ]
168
+ hidden1_deltas = [hidden1_errors[j] * self.sigmoid_derivative(self.hidden_output1[j])
169
+ for j in range(self.hidden_size1)]
170
+
171
+
172
+
173
+
174
+ for i in range(self.output_size):
175
+ for j in range(self.hidden_size2):
176
+ self.weights_hidden2_output[i][j] += learning_rate * output_deltas[i] * self.hidden_output2[j]
177
+ self.bias_output[i] += learning_rate * output_deltas[i]
178
+
179
+
180
+
181
+
182
+ for i in range(self.hidden_size2):
183
+ for j in range(self.hidden_size1):
184
+ self.weights_hidden1_hidden2[i][j] += learning_rate * hidden2_deltas[i] * self.hidden_output1[j]
185
+ self.bias_hidden2[i] += learning_rate * hidden2_deltas[i]
186
+
187
+
188
+
189
+
190
+ for i in range(self.hidden_size1):
191
+ for j in range(self.input_size):
192
+ self.weights_input_hidden1[i][j] += learning_rate * hidden1_deltas[i] * inputs[j]
193
+ self.bias_hidden1[i] += learning_rate * hidden1_deltas[i]
194
+
195
+
196
+
197
+
198
+
199
+
200
+
201
+
202
+ class RecurrentNeuralNetwork:
203
+ def __init__(self, input_size, hidden_size, output_size):
204
+ self.input_size = input_size
205
+ self.hidden_size = hidden_size
206
+ self.output_size = output_size
207
+ self.weights_input_hidden = [
208
+ [random.random() for _ in range(input_size)] for _ in range(hidden_size)
209
+ ]
210
+ self.weights_hidden_hidden = [
211
+ [random.random() for _ in range(hidden_size)] for _ in range(hidden_size)
212
+ ]
213
+ self.weights_hidden_output = [
214
+ [random.random() for _ in range(hidden_size)] for _ in range(output_size)
215
+ ]
216
+ self.bias_hidden = [random.random() for _ in range(hidden_size)]
217
+ self.bias_output = [random.random() for _ in range(output_size)]
218
+
219
+
220
+
221
+
222
+ def sigmoid(self, x):
223
+ return 1 / (1 + math.exp(-x))
224
+
225
+
226
+
227
+
228
+ def sigmoid_derivative(self, x):
229
+ return x * (1 - x)
230
+
231
+
232
+
233
+
234
+ def forward(self, inputs):
235
+ self.hidden_state = [0] * self.hidden_size
236
+ for _ in range(2):
237
+ for i in range(len(inputs)):
238
+ current_input = [0] * self.input_size
239
+ current_input[i] = inputs[i]
240
+ combined = [
241
+ sum(current_input[k] * self.weights_input_hidden[j][k] for k in range(self.input_size)) +
242
+ sum(self.hidden_state[k] * self.weights_hidden_hidden[j][k] for k in range(self.hidden_size)) +
243
+ self.bias_hidden[j]
244
+ for j in range(self.hidden_size)
245
+ ]
246
+ self.hidden_state = [self.sigmoid(val) for val in combined]
247
+ output = [
248
+ sum(self.hidden_state[k] * self.weights_hidden_output[i][k] for k in range(self.hidden_size)) +
249
+ self.bias_output[i]
250
+ for i in range(self.output_size)
251
+ ]
252
+ return [self.sigmoid(o) for o in output]
253
+
254
+
255
+
256
+
257
+ def backward(self, inputs, target, learning_rate=0.1):
258
+ output = self.forward(inputs)
259
+ output_errors = [target[i] - output[i] for i in range(self.output_size)]
260
+ output_deltas = [output_errors[i] * self.sigmoid_derivative(output[i])
261
+ for i in range(self.output_size)]
262
+ hidden_errors = [
263
+ sum(output_deltas[k] * self.weights_hidden_output[k][j] for k in range(self.output_size))
264
+ for j in range(self.hidden_size)
265
+ ]
266
+ hidden_deltas = [hidden_errors[j] * self.sigmoid_derivative(self.hidden_state[j])
267
+ for j in range(self.hidden_size)]
268
+
269
+
270
+
271
+
272
+ for i in range(self.output_size):
273
+ for j in range(self.hidden_size):
274
+ self.weights_hidden_output[i][j] += learning_rate * output_deltas[i] * self.hidden_state[j]
275
+ self.bias_output[i] += learning_rate * output_deltas[i]
276
+
277
+
278
+
279
+
280
+ for j in range(self.hidden_size):
281
+ for k in range(self.input_size):
282
+ self.weights_input_hidden[j][k] += learning_rate * hidden_deltas[j] * (inputs[k] if k < len(inputs) else 0)
283
+ self.bias_hidden[j] += learning_rate * hidden_deltas[j]
284
+ return output_errors
285
+
286
+
287
+
288
+
289
+
290
+
291
+
292
+
293
+ class ConvolutionalNeuralNetwork:
294
+ def __init__(self, input_length, kernel_size1, kernel_size2, output_size):
295
+ self.input_length = input_length
296
+ self.kernel_size1 = kernel_size1
297
+ self.kernel_size2 = kernel_size2
298
+ self.output_size = output_size
299
+ self.kernel1 = [random.random() for _ in range(kernel_size1)]
300
+ self.bias1 = random.random()
301
+ self.kernel2 = [random.random() for _ in range(kernel_size2)]
302
+ self.bias2 = random.random()
303
+ self.weights_output = [
304
+ [random.random() for _ in range(input_length - kernel_size1 - kernel_size2 + 2)]
305
+ for _ in range(output_size)
306
+ ]
307
+ self.bias_output = [random.random() for _ in range(output_size)]
308
+
309
+
310
+
311
+
312
+ def relu(self, x):
313
+ return x if x > 0 else 0
314
+
315
+
316
+
317
+
318
+ def relu_derivative(self, x):
319
+ return 1 if x > 0 else 0
320
+
321
+
322
+
323
+
324
+ def convolve(self, inputs, kernel, bias):
325
+ conv_output = []
326
+ kernel_size = len(kernel)
327
+ for i in range(len(inputs) - kernel_size + 1):
328
+ s = sum(inputs[i + j] * kernel[j] for j in range(kernel_size)) + bias
329
+ conv_output.append(self.relu(s))
330
+ return conv_output
331
+
332
+
333
+
334
+
335
+ def forward(self, inputs):
336
+ conv1 = self.convolve(inputs, self.kernel1, self.bias1)
337
+ conv2 = self.convolve(conv1, self.kernel2, self.bias2)
338
+ fc_input = conv2
339
+ output = [
340
+ sum(fc_input[j] * self.weights_output[i][j] for j in range(len(fc_input))) + self.bias_output[i]
341
+ for i in range(self.output_size)
342
+ ]
343
+ return [self.relu(o) for o in output]
344
+
345
+
346
+
347
+
348
+ def backward(self, inputs, target, learning_rate=0.1):
349
+ output = self.forward(inputs)
350
+ output_errors = [target[i] - output[i] for i in range(self.output_size)]
351
+ for i in range(self.output_size):
352
+ for j in range(len(inputs) - self.kernel_size1 - self.kernel_size2 + 2):
353
+ self.weights_output[i][j] += learning_rate * output_errors[i]
354
+ self.bias_output[i] += learning_rate * output_errors[i]
355
+ return output_errors
356
+
357
+
358
+
359
+
360
+
361
+
362
+
363
+
364
+ class GeneticAlgorithm:
365
+ def __init__(self, population_size, gene_length):
366
+ self.population_size = population_size
367
+ self.gene_length = gene_length
368
+ self.population = [
369
+ [random.random() for _ in range(gene_length)] for _ in range(population_size)
370
+ ]
371
+
372
+
373
+
374
+
375
+ def fitness(self, individual):
376
+ return -sum((gene - PHI) ** 2 for gene in individual)
377
+
378
+
379
+
380
+
381
+ def selection(self):
382
+ selected = sorted(self.population, key=self.fitness, reverse=True)
383
+ return selected[: self.population_size // 2]
384
+
385
+
386
+
387
+
388
+ def crossover(self, parent1, parent2):
389
+ point = random.randint(1, self.gene_length - 1)
390
+ child = parent1[:point] + parent2[point:]
391
+ return child
392
+
393
+
394
+
395
+
396
+ def mutate(self, individual, mutation_rate=0.01):
397
+ for i in range(self.gene_length):
398
+ if random.random() < mutation_rate:
399
+ individual[i] = random.random()
400
+ return individual
401
+
402
+
403
+
404
+
405
+ def evolve(self, generations):
406
+ for _ in range(generations):
407
+ selected = self.selection()
408
+ new_population = selected[:]
409
+ while len(new_population) < self.population_size:
410
+ parent1 = random.choice(selected)
411
+ parent2 = random.choice(selected)
412
+ child = self.crossover(parent1, parent2)
413
+ child = self.mutate(child)
414
+ new_population.append(child)
415
+ self.population = new_population
416
+ best = max(self.population, key=self.fitness)
417
+ return best, self.fitness(best)
418
+
419
+
420
+
421
+
422
+
423
+
424
+
425
+
426
+ class LSTM:
427
+ def __init__(self, input_size, hidden_size, output_size):
428
+ self.input_size = input_size
429
+ self.hidden_size = hidden_size
430
+ self.output_size = output_size
431
+ self.W_i = [[random.random() for _ in range(input_size)] for _ in range(hidden_size)]
432
+ self.U_i = [[random.random() for _ in range(hidden_size)] for _ in range(hidden_size)]
433
+ self.b_i = [random.random() for _ in range(hidden_size)]
434
+ self.W_f = [[random.random() for _ in range(input_size)] for _ in range(hidden_size)]
435
+ self.U_f = [[random.random() for _ in range(hidden_size)] for _ in range(hidden_size)]
436
+ self.b_f = [random.random() for _ in range(hidden_size)]
437
+ self.W_o = [[random.random() for _ in range(input_size)] for _ in range(hidden_size)]
438
+ self.U_o = [[random.random() for _ in range(hidden_size)] for _ in range(hidden_size)]
439
+ self.b_o = [random.random() for _ in range(hidden_size)]
440
+ self.W_c = [[random.random() for _ in range(input_size)] for _ in range(hidden_size)]
441
+ self.U_c = [[random.random() for _ in range(hidden_size)] for _ in range(hidden_size)]
442
+ self.b_c = [random.random() for _ in range(hidden_size)]
443
+ self.W_y = [[random.random() for _ in range(hidden_size)] for _ in range(output_size)]
444
+ self.b_y = [random.random() for _ in range(output_size)]
445
+
446
+
447
+
448
+
449
+ def sigmoid(self, x):
450
+ return 1 / (1 + math.exp(-x))
451
+
452
+
453
+
454
+
455
+ def forward(self, inputs):
456
+ h = [0] * self.hidden_size
457
+ c = [0] * self.hidden_size
458
+
459
+
460
+
461
+
462
+ i_gate = []
463
+ for j in range(self.hidden_size):
464
+ s = sum(inputs[k] * self.W_i[j][k] for k in range(self.input_size)) + \
465
+ sum(h[k] * self.U_i[j][k] for k in range(self.hidden_size)) + self.b_i[j]
466
+ i_gate.append(self.sigmoid(s))
467
+
468
+
469
+
470
+
471
+ f_gate = []
472
+ for j in range(self.hidden_size):
473
+ s = sum(inputs[k] * self.W_f[j][k] for k in range(self.input_size)) + \
474
+ sum(h[k] * self.U_f[j][k] for k in range(self.hidden_size)) + self.b_f[j]
475
+ f_gate.append(self.sigmoid(s))
476
+
477
+
478
+
479
+
480
+ o_gate = []
481
+ for j in range(self.hidden_size):
482
+ s = sum(inputs[k] * self.W_o[j][k] for k in range(self.input_size)) + \
483
+ sum(h[k] * self.U_o[j][k] for k in range(self.hidden_size)) + self.b_o[j]
484
+ o_gate.append(self.sigmoid(s))
485
+
486
+
487
+
488
+
489
+ g_gate = []
490
+ for j in range(self.hidden_size):
491
+ s = sum(inputs[k] * self.W_c[j][k] for k in range(self.input_size)) + \
492
+ sum(h[k] * self.U_c[j][k] for k in range(self.hidden_size)) + self.b_c[j]
493
+ g_gate.append(math.tanh(s))
494
+
495
+
496
+
497
+
498
+ c = [f_gate[j] * c[j] + i_gate[j] * g_gate[j] for j in range(self.hidden_size)]
499
+ h = [o_gate[j] * math.tanh(c[j]) for j in range(self.hidden_size)]
500
+
501
+
502
+
503
+
504
+ y = []
505
+ for i in range(self.output_size):
506
+ s = sum(h[j] * self.W_y[i][j] for j in range(self.hidden_size)) + self.b_y[i]
507
+ y.append(self.sigmoid(s))
508
+ return y
509
+
510
+
511
+
512
+
513
+
514
+
515
+
516
+
517
+ class Transformer:
518
+ def __init__(self, d_model, num_tokens):
519
+ self.d_model = d_model
520
+ self.num_tokens = num_tokens
521
+ self.W_q = [[random.random() for _ in range(d_model)] for _ in range(d_model)]
522
+ self.W_k = [[random.random() for _ in range(d_model)] for _ in range(d_model)]
523
+ self.W_v = [[random.random() for _ in range(d_model)] for _ in range(d_model)]
524
+ self.W_o = [[random.random() for _ in range(d_model)] for _ in range(d_model)]
525
+
526
+
527
+
528
+
529
+ def dot_product(self, a, b):
530
+ return sum(x * y for x, y in zip(a, b))
531
+
532
+
533
+
534
+
535
+ def matmul_vector(self, matrix, vector):
536
+ return [sum(matrix[i][j] * vector[j] for j in range(len(vector))) for i in range(len(matrix))]
537
+
538
+
539
+
540
+
541
+ def softmax(self, x):
542
+ m = max(x)
543
+ exps = [math.exp(i - m) for i in x]
544
+ s = sum(exps)
545
+ return [j / s for j in exps]
546
+
547
+
548
+
549
+
550
+ def forward(self, inputs):
551
+ queries = [self.matmul_vector(self.W_q, token) for token in inputs]
552
+ keys = [self.matmul_vector(self.W_k, token) for token in inputs]
553
+ values = [self.matmul_vector(self.W_v, token) for token in inputs]
554
+ outputs = []
555
+ for i in range(len(inputs)):
556
+ scores = []
557
+ for j in range(len(inputs)):
558
+ score = self.dot_product(queries[i], keys[j]) / math.sqrt(self.d_model)
559
+ scores.append(score)
560
+ attn = self.softmax(scores)
561
+ attn_output = [0] * self.d_model
562
+ for j in range(len(inputs)):
563
+ for k in range(self.d_model):
564
+ attn_output[k] += attn[j] * values[j][k]
565
+ out = self.matmul_vector(self.W_o, attn_output)
566
+ outputs.append(out)
567
+ avg_output = [sum(x[k] for x in outputs) / len(outputs) for k in range(self.d_model)]
568
+ proj_weights = [[random.random() for _ in range(self.d_model)] for _ in range(self.num_tokens)]
569
+ proj_bias = [random.random() for _ in range(self.num_tokens)]
570
+ token_scores = [
571
+ sum(avg_output[k] * proj_weights[i][k] for k in range(self.d_model)) + proj_bias[i]
572
+ for i in range(self.num_tokens)
573
+ ]
574
+ token_output = [1 / (1 + math.exp(-score)) for score in token_scores]
575
+ return token_output
576
+
577
+
578
+
579
+
580
+
581
+
582
+
583
+
584
+ unique_words = list(set(words))
585
+ word_to_index = {word: i for i, word in enumerate(unique_words)}
586
+ index_to_word = {i: word for word, i in word_to_index.items()}
587
+
588
+
589
+
590
+
591
+ input_data = [[0] * len(unique_words) for _ in range(len(words) - 2)]
592
+ for i in range(len(words) - 2):
593
+ input_data[i][word_to_index[words[i]]] = 1
594
+
595
+
596
+
597
+
598
+ output_data = [[0] * len(unique_words) for _ in range(len(words) - 2)]
599
+ for i in range(len(words) - 2):
600
+ output_data[i][word_to_index[words[i + 1]]] = 1
601
+
602
+
603
+
604
+
605
+ input_size = len(unique_words)
606
+ hidden_size1 = round(PHI * input_size)
607
+ hidden_size2 = round(PHI * hidden_size1)
608
+ output_size = len(unique_words)
609
+
610
+
611
+
612
+
613
+ nn = NeuralNetwork(input_size, hidden_size1, hidden_size2, output_size)
614
+ epochs = round(100 * PHI)
615
+ for epoch in range(epochs):
616
+ for i in range(len(input_data)):
617
+ nn.forward(input_data[i])
618
+ nn.backward(input_data[i], output_data[i], learning_rate=0.1)
619
+ if (epoch + 1) % round(PHI) == 0:
620
+ print("Feedforward NN Epoch {}/{}".format(epoch + 1, epochs))
621
+
622
+
623
+
624
+
625
+ rnn = RecurrentNeuralNetwork(input_size, hidden_size1, output_size)
626
+ rnn_output = rnn.forward(input_data[0])
627
+ print("Recurrent NN Output:", rnn_output)
628
+
629
+
630
+
631
+
632
+ kernel_size1 = round(3 * PHI)
633
+ kernel_size2 = round(2 * PHI)
634
+ cnn = ConvolutionalNeuralNetwork(input_length=round(10 * PHI), kernel_size1=kernel_size1,
635
+ kernel_size2=kernel_size2, output_size=output_size)
636
+ sample_input = [random.random() for _ in range(round(10 * PHI))]
637
+ cnn_output = cnn.forward(sample_input)
638
+ print("Convolutional NN Output:", cnn_output)
639
+
640
+
641
+
642
+
643
+ population_size = round(10 * PHI)
644
+ ga = GeneticAlgorithm(population_size, round(PHI * 5))
645
+ best_individual, best_fitness = ga.evolve(round(50 * PHI))
646
+ print("Genetic Algorithm Best Individual:", best_individual, "Fitness:", best_fitness)
647
+
648
+
649
+
650
+
651
+ lstm_hidden_size = round(PHI * input_size)
652
+ lstm = LSTM(input_size, lstm_hidden_size, output_size)
653
+ lstm_output = lstm.forward(input_data[0])
654
+ print("LSTM Output:", lstm_output)
655
+
656
+
657
+
658
+
659
+ transformer_d_model = round(PHI * input_size)
660
+ transformer = Transformer(transformer_d_model, output_size)
661
+ transformer_input = []
662
+ for i in range(len(unique_words)):
663
+ vec = [0] * transformer_d_model
664
+ if i < transformer_d_model:
665
+ vec[i] = 1
666
+ transformer_input.append(vec)
667
+ transformer_output = transformer.forward(transformer_input)
668
+ print("Transformer Output:", transformer_output)
669
+
670
+
671
+
672
+
673
+
674
+
675
+
676
+
677
+ def advanced_text_generation(input_vector):
678
+ ff_output = nn.forward(input_vector)
679
+ rnn_out = rnn.forward(input_vector)
680
+ lstm_out = lstm.forward(input_vector)
681
+ transformer_out = transformer.forward([input_vector])
682
+ combined = [
683
+ (ff_output[i] + rnn_out[i] + lstm_out[i] + transformer_out[i]) / 4
684
+ for i in range(len(ff_output))
685
+ ]
686
+ predicted_index = combined.index(max(combined))
687
+ predicted_word = index_to_word[predicted_index]
688
+ long_text = ""
689
+ current_length = round(10 * PHI)
690
+ for _ in range(5):
691
+ segment = generate_text(current_length)
692
+ long_text += segment + " "
693
+ current_length = round(current_length * PHI)
694
+ return long_text + predicted_word
695
+
696
+
697
+
698
+
699
+
700
+
701
+
702
+
703
+ def chat():
704
+ print("FiPhi-NeuralMark ACC Initialized")
705
+ base_length = round(5 * PHI)
706
+ while True:
707
+ user_input = input("\nYou: ")
708
+ if user_input.lower() == "exit":
709
+ print("Goodbye!")
710
+ break
711
+ user_input_tokens = user_input.split()
712
+ input_vector = [0] * len(unique_words)
713
+ for word in user_input_tokens:
714
+ if word in word_to_index:
715
+ input_vector[word_to_index[word]] = 1
716
+ response = advanced_text_generation(input_vector)
717
+ print("FiPhi-NeuralMark:", response)
718
+
719
+
720
+
721
+
722
+
723
+
724
+
725
+
726
+ chat()
727
+
728
+
729
+
730
+
731
+
732
+
733
+
734
+
735
+
736
+
737
+
738
+
739
+
740
+
741
+
742
+