E.L.N / app.py
Sephfox's picture
Update app.py
d7061cb verified
raw
history blame
4.41 kB
import streamlit as st
import numpy as np
import torch
import torch.nn as nn
import random
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# Define a function to generate a dataset
def generate_dataset(task_id):
X, y = make_classification(n_samples=100, n_features=10, n_informative=5, n_redundant=3, n_repeated=2, random_state=task_id)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=task_id)
return X_train, X_test, y_train, y_test
# Define a neural network class
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(10, 20)
self.fc2 = nn.Linear(20, 10)
self.fc3 = nn.Linear(10, 2)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
return x
# Define a genetic algorithm class
class GeneticAlgorithm:
def __init__(self, population_size):
self.population_size = population_size
self.population = [Net() for _ in range(population_size)]
def selection(self, task_id):
X_train, X_test, y_train, y_test = generate_dataset(task_id)
fitness = []
for net in self.population:
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
for epoch in range(10):
optimizer.zero_grad()
inputs = torch.tensor(X_train, dtype=torch.float32)
labels = torch.tensor(y_train, dtype=torch.long)
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
inputs = torch.tensor(X_test, dtype=torch.float32)
labels = torch.tensor(y_test, dtype=torch.long)
outputs = net(inputs)
_, predicted = torch.max(outputs, 1)
accuracy = accuracy_score(labels, predicted)
fitness.append(accuracy)
self.population = [self.population[i] for i in np.argsort(fitness)[-self.population_size//2:]]
def crossover(self):
offspring = []
for _ in range(self.population_size//2):
parent1, parent2 = random.sample(self.population, 2)
child = Net()
child.fc1.weight.data = (parent1.fc1.weight.data + parent2.fc1.weight.data) / 2
child.fc2.weight.data = (parent1.fc2.weight.data + parent2.fc2.weight.data) / 2
child.fc3.weight.data = (parent1.fc3.weight.data + parent2.fc3.weight.data) / 2
offspring.append(child)
self.population += offspring
def mutation(self):
for net in self.population:
if random.random() < 0.1:
net.fc1.weight.data += torch.randn_like(net.fc1.weight.data) * 0.1
net.fc2.weight.data += torch.randn_like(net.fc2.weight.data) * 0.1
net.fc3.weight.data += torch.randn_like(net.fc3.weight.data) * 0.1
# Streamlit app
st.title("Evolution of Sub-Models")
# Parameters
st.sidebar.header("Parameters")
population_size = st.sidebar.slider("Population size", 10, 100, 50)
num_tasks = st.sidebar.slider("Number of tasks", 1, 10, 5)
num_generations = st.sidebar.slider("Number of generations", 1, 100, 10)
# Run the evolution
if st.button("Run evolution"):
ga = GeneticAlgorithm(population_size)
for generation in range(num_generations):
for task_id in range(num_tasks):
ga.selection(task_id)
ga.crossover()
ga.mutation()
st.write(f"Generation {generation+1} complete")
# Evaluate the final population
final_accuracy = []
for task_id in range(num_tasks):
X_train, X_test, y_train, y_test = generate_dataset(task_id)
accuracy = []
for net in ga.population:
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
for epoch in range(10):
optimizer.zero_grad()
inputs = torch.tensor(X_train, dtype=torch.float32)
labels = torch.tensor(y_train, dtype=torch.long)
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()