Spaces:
Running
Running
File size: 4,737 Bytes
bd712f3 d7d4643 d7061cb d7d4643 bd712f3 d7061cb bd712f3 d7061cb d7d4643 bd712f3 d7d4643 bd712f3 d7d4643 d7061cb bd712f3 d7061cb bd712f3 d7061cb bd712f3 d7061cb d7d4643 d7061cb bd712f3 9ed5b44 bd712f3 9ed5b44 bd712f3 d7061cb bd712f3 d7061cb bd712f3 d7061cb bd712f3 d7061cb bd712f3 d7061cb d7d4643 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
import streamlit as st
import numpy as np
import tensorflow as tf
from tensorflow import keras
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import random
# Define a function to generate a dataset
def generate_dataset(task_id):
X, y = make_classification(n_samples=100, n_features=10, n_informative=5, n_redundant=3, n_repeated=2, random_state=task_id)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=task_id)
return X_train, X_test, y_train, y_test
# Define a neural network class
class Net(keras.Model):
def __init__(self):
super(Net, self).__init__()
self.fc1 = keras.layers.Dense(20, activation='relu', input_shape=(10,))
self.fc2 = keras.layers.Dense(10, activation='relu')
self.fc3 = keras.layers.Dense(2)
def call(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
# Define a genetic algorithm class
class GeneticAlgorithm:
def __init__(self, population_size):
self.population_size = population_size
self.population = [Net() for _ in range(population_size)]
def selection(self, task_id):
X_train, X_test, y_train, y_test = generate_dataset(task_id)
fitness = []
for net in self.population:
net.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
net.fit(X_train, y_train, epochs=10, verbose=0)
loss, accuracy = net.evaluate(X_test, y_test, verbose=0)
fitness.append(accuracy)
self.population = [self.population[i] for i in np.argsort(fitness)[-self.population_size//2:]]
def crossover(self):
offspring = []
for _ in range(self.population_size//2):
parent1, parent2 = random.sample(self.population, 2)
child = Net()
# Average the weights of the two parents
parent1_weights = parent1.fc1.get_weights()
parent2_weights = parent2.fc1.get_weights()
child_weights = [(np.array(w1) + np.array(w2)) / 2 for w1, w2 in zip(parent1_weights, parent2_weights)]
child.fc1.set_weights(child_weights)
parent1_weights = parent1.fc2.get_weights()
parent2_weights = parent2.fc2.get_weights()
child_weights = [(np.array(w1) + np.array(w2)) / 2 for w1, w2 in zip(parent1_weights, parent2_weights)]
child.fc2.set_weights(child_weights)
parent1_weights = parent1.fc3.get_weights()
parent2_weights = parent2.fc3.get_weights()
child_weights = [(np.array(w1) + np.array(w2)) / 2 for w1, w2 in zip(parent1_weights, parent2_weights)]
child.fc3.set_weights(child_weights)
offspring.append(child)
self.population += offspring
def mutation(self):
for net in self.population:
if random.random() < 0.1:
weights = net.fc1.get_weights()
new_weights = [np.array(w) + np.random.randn(*w.shape) * 0.1 for w in weights]
net.fc1.set_weights(new_weights)
weights = net.fc2.get_weights()
new_weights = [np.array(w) + np.random.randn(*w.shape) * 0.1 for w in weights]
net.fc2.set_weights(new_weights)
weights = net.fc3.get_weights()
new_weights = [np.array(w) + np.random.randn(*w.shape) * 0.1 for w in weights]
net.fc3.set_weights(new_weights)
# Streamlit app
st.title("Evolution of Sub-Models")
# Parameters
st.sidebar.header("Parameters")
population_size = st.sidebar.slider("Population size", 10, 100, 50)
num_tasks = st.sidebar.slider("Number of tasks", 1, 10, 5)
num_generations = st.sidebar.slider("Number of generations", 1, 100, 10)
# Run the evolution
if st.button("Run evolution"):
ga = GeneticAlgorithm(population_size)
for generation in range(num_generations):
for task_id in range(num_tasks):
ga.selection(task_id)
ga.crossover()
ga.mutation()
st.write(f"Generation {generation+1} complete")
# Evaluate the final population
final_accuracy = []
for task_id in range(num_tasks):
X_train, X_test, y_train, y_test = generate_dataset(task_id)
accuracy = []
for net in ga.population:
net.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
net.fit(X_train, y_train, epochs=10, verbose=0)
loss, acc = net.evaluate(X_test, y_test, verbose=0)
accuracy.append(acc)
final_accuracy.append(np.mean(accuracy))
st.write(f"Final accuracy: {np.mean(final_accuracy)}") |