|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""ACC-FiPhi-NeuralMark-V3""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import random |
|
import math |
|
import time |
|
import os |
|
|
|
|
|
|
|
|
|
PHI = (1 + math.sqrt(5)) / 2 |
|
|
|
|
|
|
|
|
|
text = os.getenv("FiPhi-NeuralMark-V3/TRAINING_DATA.txt") |
|
|
|
|
|
|
|
|
|
words = text.split() |
|
|
|
|
|
|
|
|
|
trigram_chain = {} |
|
for i in range(len(words) - 2): |
|
key = (words[i], words[i + 1]) |
|
next_word = words[i + 2] |
|
if key not in trigram_chain: |
|
trigram_chain[key] = [] |
|
trigram_chain[key].append(next_word) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_text(length): |
|
if len(words) < 2: |
|
return "" |
|
key = random.choice(list(trigram_chain.keys())) |
|
result = [key[0], key[1]] |
|
for _ in range(length - 2): |
|
if key in trigram_chain: |
|
next_word = random.choice(trigram_chain[key]) |
|
result.append(next_word) |
|
key = (key[1], next_word) |
|
else: |
|
break |
|
return " ".join(result) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class NeuralNetwork: |
|
def __init__(self, input_size, hidden_size1, hidden_size2, output_size): |
|
self.input_size = input_size |
|
self.hidden_size1 = hidden_size1 |
|
self.hidden_size2 = hidden_size2 |
|
self.output_size = output_size |
|
self.weights_input_hidden1 = [ |
|
[random.random() for _ in range(input_size)] for _ in range(hidden_size1) |
|
] |
|
self.weights_hidden1_hidden2 = [ |
|
[random.random() for _ in range(hidden_size1)] for _ in range(hidden_size2) |
|
] |
|
self.weights_hidden2_output = [ |
|
[random.random() for _ in range(hidden_size2)] for _ in range(output_size) |
|
] |
|
self.bias_hidden1 = [random.random() for _ in range(hidden_size1)] |
|
self.bias_hidden2 = [random.random() for _ in range(hidden_size2)] |
|
self.bias_output = [random.random() for _ in range(output_size)] |
|
|
|
|
|
|
|
|
|
def sigmoid(self, x): |
|
return 1 / (1 + math.exp(-x)) |
|
|
|
|
|
|
|
|
|
def sigmoid_derivative(self, x): |
|
return x * (1 - x) |
|
|
|
|
|
|
|
|
|
def forward(self, inputs): |
|
self.hidden_input1 = [ |
|
sum(inputs[i] * self.weights_input_hidden1[j][i] for i in range(self.input_size)) + self.bias_hidden1[j] |
|
for j in range(self.hidden_size1) |
|
] |
|
self.hidden_output1 = [self.sigmoid(x) for x in self.hidden_input1] |
|
self.hidden_input2 = [ |
|
sum(self.hidden_output1[i] * self.weights_hidden1_hidden2[j][i] for i in range(self.hidden_size1)) + self.bias_hidden2[j] |
|
for j in range(self.hidden_size2) |
|
] |
|
self.hidden_output2 = [self.sigmoid(x) for x in self.hidden_input2] |
|
self.output_input = [ |
|
sum(self.hidden_output2[i] * self.weights_hidden2_output[j][i] for i in range(self.hidden_size2)) + self.bias_output[j] |
|
for j in range(self.output_size) |
|
] |
|
self.output_output = [self.sigmoid(x) for x in self.output_input] |
|
return self.output_output |
|
|
|
|
|
|
|
|
|
def backward(self, inputs, target, learning_rate=0.1): |
|
output_errors = [target[i] - self.output_output[i] for i in range(self.output_size)] |
|
output_deltas = [output_errors[i] * self.sigmoid_derivative(self.output_output[i]) |
|
for i in range(self.output_size)] |
|
hidden2_errors = [ |
|
sum(output_deltas[k] * self.weights_hidden2_output[k][j] for k in range(self.output_size)) |
|
for j in range(self.hidden_size2) |
|
] |
|
hidden2_deltas = [hidden2_errors[j] * self.sigmoid_derivative(self.hidden_output2[j]) |
|
for j in range(self.hidden_size2)] |
|
hidden1_errors = [ |
|
sum(hidden2_deltas[k] * self.weights_hidden1_hidden2[k][j] for k in range(self.hidden_size2)) |
|
for j in range(self.hidden_size1) |
|
] |
|
hidden1_deltas = [hidden1_errors[j] * self.sigmoid_derivative(self.hidden_output1[j]) |
|
for j in range(self.hidden_size1)] |
|
|
|
|
|
|
|
|
|
for i in range(self.output_size): |
|
for j in range(self.hidden_size2): |
|
self.weights_hidden2_output[i][j] += learning_rate * output_deltas[i] * self.hidden_output2[j] |
|
self.bias_output[i] += learning_rate * output_deltas[i] |
|
|
|
|
|
|
|
|
|
for i in range(self.hidden_size2): |
|
for j in range(self.hidden_size1): |
|
self.weights_hidden1_hidden2[i][j] += learning_rate * hidden2_deltas[i] * self.hidden_output1[j] |
|
self.bias_hidden2[i] += learning_rate * hidden2_deltas[i] |
|
|
|
|
|
|
|
|
|
for i in range(self.hidden_size1): |
|
for j in range(self.input_size): |
|
self.weights_input_hidden1[i][j] += learning_rate * hidden1_deltas[i] * inputs[j] |
|
self.bias_hidden1[i] += learning_rate * hidden1_deltas[i] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class RecurrentNeuralNetwork: |
|
def __init__(self, input_size, hidden_size, output_size): |
|
self.input_size = input_size |
|
self.hidden_size = hidden_size |
|
self.output_size = output_size |
|
self.weights_input_hidden = [ |
|
[random.random() for _ in range(input_size)] for _ in range(hidden_size) |
|
] |
|
self.weights_hidden_hidden = [ |
|
[random.random() for _ in range(hidden_size)] for _ in range(hidden_size) |
|
] |
|
self.weights_hidden_output = [ |
|
[random.random() for _ in range(hidden_size)] for _ in range(output_size) |
|
] |
|
self.bias_hidden = [random.random() for _ in range(hidden_size)] |
|
self.bias_output = [random.random() for _ in range(output_size)] |
|
|
|
|
|
|
|
|
|
def sigmoid(self, x): |
|
return 1 / (1 + math.exp(-x)) |
|
|
|
|
|
|
|
|
|
def sigmoid_derivative(self, x): |
|
return x * (1 - x) |
|
|
|
|
|
|
|
|
|
def forward(self, inputs): |
|
self.hidden_state = [0] * self.hidden_size |
|
for _ in range(2): |
|
for i in range(len(inputs)): |
|
current_input = [0] * self.input_size |
|
current_input[i] = inputs[i] |
|
combined = [ |
|
sum(current_input[k] * self.weights_input_hidden[j][k] for k in range(self.input_size)) + |
|
sum(self.hidden_state[k] * self.weights_hidden_hidden[j][k] for k in range(self.hidden_size)) + |
|
self.bias_hidden[j] |
|
for j in range(self.hidden_size) |
|
] |
|
self.hidden_state = [self.sigmoid(val) for val in combined] |
|
output = [ |
|
sum(self.hidden_state[k] * self.weights_hidden_output[i][k] for k in range(self.hidden_size)) + |
|
self.bias_output[i] |
|
for i in range(self.output_size) |
|
] |
|
return [self.sigmoid(o) for o in output] |
|
|
|
|
|
|
|
|
|
def backward(self, inputs, target, learning_rate=0.1): |
|
output = self.forward(inputs) |
|
output_errors = [target[i] - output[i] for i in range(self.output_size)] |
|
output_deltas = [output_errors[i] * self.sigmoid_derivative(output[i]) |
|
for i in range(self.output_size)] |
|
hidden_errors = [ |
|
sum(output_deltas[k] * self.weights_hidden_output[k][j] for k in range(self.output_size)) |
|
for j in range(self.hidden_size) |
|
] |
|
hidden_deltas = [hidden_errors[j] * self.sigmoid_derivative(self.hidden_state[j]) |
|
for j in range(self.hidden_size)] |
|
|
|
|
|
|
|
|
|
for i in range(self.output_size): |
|
for j in range(self.hidden_size): |
|
self.weights_hidden_output[i][j] += learning_rate * output_deltas[i] * self.hidden_state[j] |
|
self.bias_output[i] += learning_rate * output_deltas[i] |
|
|
|
|
|
|
|
|
|
for j in range(self.hidden_size): |
|
for k in range(self.input_size): |
|
self.weights_input_hidden[j][k] += learning_rate * hidden_deltas[j] * (inputs[k] if k < len(inputs) else 0) |
|
self.bias_hidden[j] += learning_rate * hidden_deltas[j] |
|
return output_errors |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ConvolutionalNeuralNetwork: |
|
def __init__(self, input_length, kernel_size1, kernel_size2, output_size): |
|
self.input_length = input_length |
|
self.kernel_size1 = kernel_size1 |
|
self.kernel_size2 = kernel_size2 |
|
self.output_size = output_size |
|
self.kernel1 = [random.random() for _ in range(kernel_size1)] |
|
self.bias1 = random.random() |
|
self.kernel2 = [random.random() for _ in range(kernel_size2)] |
|
self.bias2 = random.random() |
|
self.weights_output = [ |
|
[random.random() for _ in range(input_length - kernel_size1 - kernel_size2 + 2)] |
|
for _ in range(output_size) |
|
] |
|
self.bias_output = [random.random() for _ in range(output_size)] |
|
|
|
|
|
|
|
|
|
def relu(self, x): |
|
return x if x > 0 else 0 |
|
|
|
|
|
|
|
|
|
def relu_derivative(self, x): |
|
return 1 if x > 0 else 0 |
|
|
|
|
|
|
|
|
|
def convolve(self, inputs, kernel, bias): |
|
conv_output = [] |
|
kernel_size = len(kernel) |
|
for i in range(len(inputs) - kernel_size + 1): |
|
s = sum(inputs[i + j] * kernel[j] for j in range(kernel_size)) + bias |
|
conv_output.append(self.relu(s)) |
|
return conv_output |
|
|
|
|
|
|
|
|
|
def forward(self, inputs): |
|
conv1 = self.convolve(inputs, self.kernel1, self.bias1) |
|
conv2 = self.convolve(conv1, self.kernel2, self.bias2) |
|
fc_input = conv2 |
|
output = [ |
|
sum(fc_input[j] * self.weights_output[i][j] for j in range(len(fc_input))) + self.bias_output[i] |
|
for i in range(self.output_size) |
|
] |
|
return [self.relu(o) for o in output] |
|
|
|
|
|
|
|
|
|
def backward(self, inputs, target, learning_rate=0.1): |
|
output = self.forward(inputs) |
|
output_errors = [target[i] - output[i] for i in range(self.output_size)] |
|
for i in range(self.output_size): |
|
for j in range(len(inputs) - self.kernel_size1 - self.kernel_size2 + 2): |
|
self.weights_output[i][j] += learning_rate * output_errors[i] |
|
self.bias_output[i] += learning_rate * output_errors[i] |
|
return output_errors |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class GeneticAlgorithm: |
|
def __init__(self, population_size, gene_length): |
|
self.population_size = population_size |
|
self.gene_length = gene_length |
|
self.population = [ |
|
[random.random() for _ in range(gene_length)] for _ in range(population_size) |
|
] |
|
|
|
|
|
|
|
|
|
def fitness(self, individual): |
|
return -sum((gene - PHI) ** 2 for gene in individual) |
|
|
|
|
|
|
|
|
|
def selection(self): |
|
selected = sorted(self.population, key=self.fitness, reverse=True) |
|
return selected[: self.population_size // 2] |
|
|
|
|
|
|
|
|
|
def crossover(self, parent1, parent2): |
|
point = random.randint(1, self.gene_length - 1) |
|
child = parent1[:point] + parent2[point:] |
|
return child |
|
|
|
|
|
|
|
|
|
def mutate(self, individual, mutation_rate=0.01): |
|
for i in range(self.gene_length): |
|
if random.random() < mutation_rate: |
|
individual[i] = random.random() |
|
return individual |
|
|
|
|
|
|
|
|
|
def evolve(self, generations): |
|
for _ in range(generations): |
|
selected = self.selection() |
|
new_population = selected[:] |
|
while len(new_population) < self.population_size: |
|
parent1 = random.choice(selected) |
|
parent2 = random.choice(selected) |
|
child = self.crossover(parent1, parent2) |
|
child = self.mutate(child) |
|
new_population.append(child) |
|
self.population = new_population |
|
best = max(self.population, key=self.fitness) |
|
return best, self.fitness(best) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class LSTM: |
|
def __init__(self, input_size, hidden_size, output_size): |
|
self.input_size = input_size |
|
self.hidden_size = hidden_size |
|
self.output_size = output_size |
|
self.W_i = [[random.random() for _ in range(input_size)] for _ in range(hidden_size)] |
|
self.U_i = [[random.random() for _ in range(hidden_size)] for _ in range(hidden_size)] |
|
self.b_i = [random.random() for _ in range(hidden_size)] |
|
self.W_f = [[random.random() for _ in range(input_size)] for _ in range(hidden_size)] |
|
self.U_f = [[random.random() for _ in range(hidden_size)] for _ in range(hidden_size)] |
|
self.b_f = [random.random() for _ in range(hidden_size)] |
|
self.W_o = [[random.random() for _ in range(input_size)] for _ in range(hidden_size)] |
|
self.U_o = [[random.random() for _ in range(hidden_size)] for _ in range(hidden_size)] |
|
self.b_o = [random.random() for _ in range(hidden_size)] |
|
self.W_c = [[random.random() for _ in range(input_size)] for _ in range(hidden_size)] |
|
self.U_c = [[random.random() for _ in range(hidden_size)] for _ in range(hidden_size)] |
|
self.b_c = [random.random() for _ in range(hidden_size)] |
|
self.W_y = [[random.random() for _ in range(hidden_size)] for _ in range(output_size)] |
|
self.b_y = [random.random() for _ in range(output_size)] |
|
|
|
|
|
|
|
|
|
def sigmoid(self, x): |
|
return 1 / (1 + math.exp(-x)) |
|
|
|
|
|
|
|
|
|
def forward(self, inputs): |
|
h = [0] * self.hidden_size |
|
c = [0] * self.hidden_size |
|
|
|
|
|
|
|
|
|
i_gate = [] |
|
for j in range(self.hidden_size): |
|
s = sum(inputs[k] * self.W_i[j][k] for k in range(self.input_size)) + \ |
|
sum(h[k] * self.U_i[j][k] for k in range(self.hidden_size)) + self.b_i[j] |
|
i_gate.append(self.sigmoid(s)) |
|
|
|
|
|
|
|
|
|
f_gate = [] |
|
for j in range(self.hidden_size): |
|
s = sum(inputs[k] * self.W_f[j][k] for k in range(self.input_size)) + \ |
|
sum(h[k] * self.U_f[j][k] for k in range(self.hidden_size)) + self.b_f[j] |
|
f_gate.append(self.sigmoid(s)) |
|
|
|
|
|
|
|
|
|
o_gate = [] |
|
for j in range(self.hidden_size): |
|
s = sum(inputs[k] * self.W_o[j][k] for k in range(self.input_size)) + \ |
|
sum(h[k] * self.U_o[j][k] for k in range(self.hidden_size)) + self.b_o[j] |
|
o_gate.append(self.sigmoid(s)) |
|
|
|
|
|
|
|
|
|
g_gate = [] |
|
for j in range(self.hidden_size): |
|
s = sum(inputs[k] * self.W_c[j][k] for k in range(self.input_size)) + \ |
|
sum(h[k] * self.U_c[j][k] for k in range(self.hidden_size)) + self.b_c[j] |
|
g_gate.append(math.tanh(s)) |
|
|
|
|
|
|
|
|
|
c = [f_gate[j] * c[j] + i_gate[j] * g_gate[j] for j in range(self.hidden_size)] |
|
h = [o_gate[j] * math.tanh(c[j]) for j in range(self.hidden_size)] |
|
|
|
|
|
|
|
|
|
y = [] |
|
for i in range(self.output_size): |
|
s = sum(h[j] * self.W_y[i][j] for j in range(self.hidden_size)) + self.b_y[i] |
|
y.append(self.sigmoid(s)) |
|
return y |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Transformer: |
|
def __init__(self, d_model, num_tokens): |
|
self.d_model = d_model |
|
self.num_tokens = num_tokens |
|
self.W_q = [[random.random() for _ in range(d_model)] for _ in range(d_model)] |
|
self.W_k = [[random.random() for _ in range(d_model)] for _ in range(d_model)] |
|
self.W_v = [[random.random() for _ in range(d_model)] for _ in range(d_model)] |
|
self.W_o = [[random.random() for _ in range(d_model)] for _ in range(d_model)] |
|
|
|
|
|
|
|
|
|
def dot_product(self, a, b): |
|
return sum(x * y for x, y in zip(a, b)) |
|
|
|
|
|
|
|
|
|
def matmul_vector(self, matrix, vector): |
|
return [sum(matrix[i][j] * vector[j] for j in range(len(vector))) for i in range(len(matrix))] |
|
|
|
|
|
|
|
|
|
def softmax(self, x): |
|
m = max(x) |
|
exps = [math.exp(i - m) for i in x] |
|
s = sum(exps) |
|
return [j / s for j in exps] |
|
|
|
|
|
|
|
|
|
def forward(self, inputs): |
|
queries = [self.matmul_vector(self.W_q, token) for token in inputs] |
|
keys = [self.matmul_vector(self.W_k, token) for token in inputs] |
|
values = [self.matmul_vector(self.W_v, token) for token in inputs] |
|
outputs = [] |
|
for i in range(len(inputs)): |
|
scores = [] |
|
for j in range(len(inputs)): |
|
score = self.dot_product(queries[i], keys[j]) / math.sqrt(self.d_model) |
|
scores.append(score) |
|
attn = self.softmax(scores) |
|
attn_output = [0] * self.d_model |
|
for j in range(len(inputs)): |
|
for k in range(self.d_model): |
|
attn_output[k] += attn[j] * values[j][k] |
|
out = self.matmul_vector(self.W_o, attn_output) |
|
outputs.append(out) |
|
avg_output = [sum(x[k] for x in outputs) / len(outputs) for k in range(self.d_model)] |
|
proj_weights = [[random.random() for _ in range(self.d_model)] for _ in range(self.num_tokens)] |
|
proj_bias = [random.random() for _ in range(self.num_tokens)] |
|
token_scores = [ |
|
sum(avg_output[k] * proj_weights[i][k] for k in range(self.d_model)) + proj_bias[i] |
|
for i in range(self.num_tokens) |
|
] |
|
token_output = [1 / (1 + math.exp(-score)) for score in token_scores] |
|
return token_output |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
unique_words = list(set(words)) |
|
word_to_index = {word: i for i, word in enumerate(unique_words)} |
|
index_to_word = {i: word for word, i in word_to_index.items()} |
|
|
|
|
|
|
|
|
|
input_data = [[0] * len(unique_words) for _ in range(len(words) - 2)] |
|
for i in range(len(words) - 2): |
|
input_data[i][word_to_index[words[i]]] = 1 |
|
|
|
|
|
|
|
|
|
output_data = [[0] * len(unique_words) for _ in range(len(words) - 2)] |
|
for i in range(len(words) - 2): |
|
output_data[i][word_to_index[words[i + 1]]] = 1 |
|
|
|
|
|
|
|
|
|
input_size = len(unique_words) |
|
hidden_size1 = round(PHI * input_size) |
|
hidden_size2 = round(PHI * hidden_size1) |
|
output_size = len(unique_words) |
|
|
|
|
|
|
|
|
|
nn = NeuralNetwork(input_size, hidden_size1, hidden_size2, output_size) |
|
epochs = round(100 * PHI) |
|
for epoch in range(epochs): |
|
for i in range(len(input_data)): |
|
nn.forward(input_data[i]) |
|
nn.backward(input_data[i], output_data[i], learning_rate=0.1) |
|
if (epoch + 1) % round(PHI) == 0: |
|
print("Feedforward NN Epoch {}/{}".format(epoch + 1, epochs)) |
|
|
|
|
|
|
|
|
|
rnn = RecurrentNeuralNetwork(input_size, hidden_size1, output_size) |
|
rnn_output = rnn.forward(input_data[0]) |
|
print("Recurrent NN Output:", rnn_output) |
|
|
|
|
|
|
|
|
|
kernel_size1 = round(3 * PHI) |
|
kernel_size2 = round(2 * PHI) |
|
cnn = ConvolutionalNeuralNetwork(input_length=round(10 * PHI), kernel_size1=kernel_size1, |
|
kernel_size2=kernel_size2, output_size=output_size) |
|
sample_input = [random.random() for _ in range(round(10 * PHI))] |
|
cnn_output = cnn.forward(sample_input) |
|
print("Convolutional NN Output:", cnn_output) |
|
|
|
|
|
|
|
|
|
population_size = round(10 * PHI) |
|
ga = GeneticAlgorithm(population_size, round(PHI * 5)) |
|
best_individual, best_fitness = ga.evolve(round(50 * PHI)) |
|
print("Genetic Algorithm Best Individual:", best_individual, "Fitness:", best_fitness) |
|
|
|
|
|
|
|
|
|
lstm_hidden_size = round(PHI * input_size) |
|
lstm = LSTM(input_size, lstm_hidden_size, output_size) |
|
lstm_output = lstm.forward(input_data[0]) |
|
print("LSTM Output:", lstm_output) |
|
|
|
|
|
|
|
|
|
transformer_d_model = round(PHI * input_size) |
|
transformer = Transformer(transformer_d_model, output_size) |
|
transformer_input = [] |
|
for i in range(len(unique_words)): |
|
vec = [0] * transformer_d_model |
|
if i < transformer_d_model: |
|
vec[i] = 1 |
|
transformer_input.append(vec) |
|
transformer_output = transformer.forward(transformer_input) |
|
print("Transformer Output:", transformer_output) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def advanced_text_generation(input_vector): |
|
ff_output = nn.forward(input_vector) |
|
rnn_out = rnn.forward(input_vector) |
|
lstm_out = lstm.forward(input_vector) |
|
transformer_out = transformer.forward([input_vector]) |
|
combined = [ |
|
(ff_output[i] + rnn_out[i] + lstm_out[i] + transformer_out[i]) / 4 |
|
for i in range(len(ff_output)) |
|
] |
|
predicted_index = combined.index(max(combined)) |
|
predicted_word = index_to_word[predicted_index] |
|
long_text = "" |
|
current_length = round(10 * PHI) |
|
for _ in range(5): |
|
segment = generate_text(current_length) |
|
long_text += segment + " " |
|
current_length = round(current_length * PHI) |
|
return long_text + predicted_word |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def chat(): |
|
print("FiPhi-NeuralMark ACC Initialized") |
|
base_length = round(5 * PHI) |
|
while True: |
|
user_input = input("\nYou: ") |
|
if user_input.lower() == "exit": |
|
print("Goodbye!") |
|
break |
|
user_input_tokens = user_input.split() |
|
input_vector = [0] * len(unique_words) |
|
for word in user_input_tokens: |
|
if word in word_to_index: |
|
input_vector[word_to_index[word]] = 1 |
|
response = advanced_text_generation(input_vector) |
|
print("FiPhi-NeuralMark:", response) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
chat() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|