Spaces:
Sleeping
Sleeping
import numpy as np | |
import pandas as pd | |
import statsmodels.formula.api as smf | |
import statsmodels.api as sm | |
import plotly.graph_objects as go | |
from plotly.subplots import make_subplots | |
from scipy.optimize import minimize | |
import plotly.express as px | |
from scipy.stats import t, f | |
import gradio as gr | |
class RSM_BoxBehnken: | |
def __init__(self, data, x1_name, x2_name, x3_name, y_name, x1_levels, x2_levels, x3_levels): | |
""" | |
Inicializa la clase con los datos del dise帽o Box-Behnken. | |
Args: | |
data (pd.DataFrame): DataFrame con los datos del experimento. | |
x1_name (str): Nombre de la primera variable independiente. | |
x2_name (str): Nombre de la segunda variable independiente. | |
x3_name (str): Nombre de la tercera variable independiente. | |
y_name (str): Nombre de la variable dependiente. | |
x1_levels (list): Niveles de la primera variable independiente. | |
x2_levels (list): Niveles de la segunda variable independiente. | |
x3_levels (list): Niveles de la tercera variable independiente. | |
""" | |
self.data = data.copy() | |
self.model = None | |
self.model_simplified = None | |
self.optimized_results = None | |
self.optimal_levels = None | |
self.x1_name = x1_name | |
self.x2_name = x2_name | |
self.x3_name = x3_name | |
self.y_name = y_name | |
# Niveles originales de las variables | |
self.x1_levels = x1_levels | |
self.x2_levels = x2_levels | |
self.x3_levels = x3_levels | |
def get_levels(self, variable_name): | |
""" | |
Obtiene los niveles para una variable espec铆fica. | |
Args: | |
variable_name (str): Nombre de la variable. | |
Returns: | |
list: Niveles de la variable. | |
""" | |
if variable_name == self.x1_name: | |
return self.x1_levels | |
elif variable_name == self.x2_name: | |
return self.x2_levels | |
elif variable_name == self.x3_name: | |
return self.x3_levels | |
else: | |
raise ValueError(f"Variable desconocida: {variable_name}") | |
def fit_model(self): | |
""" | |
Ajusta el modelo de segundo orden completo a los datos. | |
""" | |
formula = f'{self.y_name} ~ {self.x1_name} + {self.x2_name} + {self.x3_name} + ' \ | |
f'I({self.x1_name}**2) + I({self.x2_name}**2) + I({self.x3_name}**2) + ' \ | |
f'{self.x1_name}:{self.x2_name} + {self.x1_name}:{self.x3_name} + {self.x2_name}:{self.x3_name}' | |
self.model = smf.ols(formula, data=self.data).fit() | |
print("Modelo Completo:") | |
print(self.model.summary()) | |
return self.model, self.pareto_chart(self.model, "Pareto - Modelo Completo") | |
def fit_simplified_model(self): | |
""" | |
Ajusta el modelo de segundo orden a los datos, eliminando t茅rminos no significativos. | |
""" | |
formula = f'{self.y_name} ~ {self.x1_name} + {self.x2_name} + ' \ | |
f'I({self.x1_name}**2) + I({self.x2_name}**2) + I({self.x3_name}**2)' | |
self.model_simplified = smf.ols(formula, data=self.data).fit() | |
print("\nModelo Simplificado:") | |
print(self.model_simplified.summary()) | |
return self.model_simplified, self.pareto_chart(self.model_simplified, "Pareto - Modelo Simplificado") | |
def optimize(self, method='Nelder-Mead'): | |
""" | |
Encuentra los niveles 贸ptimos de los factores para maximizar la respuesta usando el modelo simplificado. | |
Args: | |
method (str): M茅todo de optimizaci贸n a utilizar (por defecto, 'Nelder-Mead'). | |
""" | |
if self.model_simplified is None: | |
print("Error: Ajusta el modelo simplificado primero.") | |
return | |
def objective_function(x): | |
return -self.model_simplified.predict(pd.DataFrame({self.x1_name: [x[0]], self.x2_name: [x[1]], self.x3_name: [x[2]]})) | |
bounds = [(-1, 1), (-1, 1), (-1, 1)] | |
x0 = [0, 0, 0] | |
self.optimized_results = minimize(objective_function, x0, method=method, bounds=bounds) | |
self.optimal_levels = self.optimized_results.x | |
# Convertir niveles 贸ptimos de codificados a naturales | |
optimal_levels_natural = [ | |
self.coded_to_natural(self.optimal_levels[0], self.x1_name), | |
self.coded_to_natural(self.optimal_levels[1], self.x2_name), | |
self.coded_to_natural(self.optimal_levels[2], self.x3_name) | |
] | |
# Crear la tabla de optimizaci贸n | |
optimization_table = pd.DataFrame({ | |
'Variable': [self.x1_name, self.x2_name, self.x3_name], | |
'Nivel 脫ptimo (Natural)': optimal_levels_natural, | |
'Nivel 脫ptimo (Codificado)': self.optimal_levels | |
}) | |
return optimization_table | |
def plot_rsm_individual(self, fixed_variable, fixed_level): | |
""" | |
Genera un gr谩fico de superficie de respuesta (RSM) individual para una configuraci贸n espec铆fica. | |
Args: | |
fixed_variable (str): Nombre de la variable a mantener fija. | |
fixed_level (float): Nivel al que se fija la variable (en unidades naturales). | |
Returns: | |
go.Figure: Objeto de figura de Plotly. | |
""" | |
if self.model_simplified is None: | |
print("Error: Ajusta el modelo simplificado primero.") | |
return None | |
# Determinar las variables que var铆an y sus niveles naturales | |
varying_variables = [var for var in [self.x1_name, self.x2_name, self.x3_name] if var != fixed_variable] | |
# Establecer los niveles naturales para las variables que var铆an | |
x_natural_levels = self.get_levels(varying_variables[0]) | |
y_natural_levels = self.get_levels(varying_variables[1]) | |
# Crear una malla de puntos para las variables que var铆an (en unidades naturales) | |
x_range_natural = np.linspace(x_natural_levels[0], x_natural_levels[-1], 100) | |
y_range_natural = np.linspace(y_natural_levels[0], y_natural_levels[-1], 100) | |
x_grid_natural, y_grid_natural = np.meshgrid(x_range_natural, y_range_natural) | |
# Convertir la malla de variables naturales a codificadas | |
x_grid_coded = self.natural_to_coded(x_grid_natural, varying_variables[0]) | |
y_grid_coded = self.natural_to_coded(y_grid_natural, varying_variables[1]) | |
# Crear un DataFrame para la predicci贸n con variables codificadas | |
prediction_data = pd.DataFrame({ | |
varying_variables[0]: x_grid_coded.flatten(), | |
varying_variables[1]: y_grid_coded.flatten(), | |
}) | |
prediction_data[fixed_variable] = self.natural_to_coded(fixed_level, fixed_variable) | |
# Calcular los valores predichos | |
z_pred = self.model_simplified.predict(prediction_data).values.reshape(x_grid_coded.shape) | |
# 1. Identificar los dos factores que var铆an | |
varying_variables = [var for var in [self.x1_name, self.x2_name, self.x3_name] if var != fixed_variable] | |
# 2. Filtrar por el nivel de la variable fija (en codificado) | |
fixed_level_coded = self.natural_to_coded(fixed_level, fixed_variable) | |
subset_data = self.data[np.isclose(self.data[fixed_variable], fixed_level_coded)] | |
# 3. Filtrar por niveles v谩lidos en las variables que var铆an | |
valid_levels = [-1, 0, 1] | |
experiments_data = subset_data[ | |
subset_data[varying_variables[0]].isin(valid_levels) & | |
subset_data[varying_variables[1]].isin(valid_levels) | |
] | |
# Convertir coordenadas de experimentos a naturales | |
experiments_x_natural = experiments_data[varying_variables[0]].apply(lambda x: self.coded_to_natural(x, varying_variables[0])) | |
experiments_y_natural = experiments_data[varying_variables[1]].apply(lambda x: self.coded_to_natural(x, varying_variables[1])) | |
# Crear el gr谩fico de superficie con variables naturales en los ejes y transparencia | |
fig = go.Figure(data=[go.Surface(z=z_pred, x=x_grid_natural, y=y_grid_natural, colorscale='Viridis', opacity=0.7, showscale=True)]) | |
# --- A帽adir cuadr铆cula a la superficie --- | |
# L铆neas en la direcci贸n x | |
for i in range(x_grid_natural.shape[0]): | |
fig.add_trace(go.Scatter3d( | |
x=x_grid_natural[i, :], | |
y=y_grid_natural[i, :], | |
z=z_pred[i, :], | |
mode='lines', | |
line=dict(color='gray', width=2), | |
showlegend=False, | |
hoverinfo='skip' | |
)) | |
# L铆neas en la direcci贸n y | |
for j in range(x_grid_natural.shape[1]): | |
fig.add_trace(go.Scatter3d( | |
x=x_grid_natural[:, j], | |
y=y_grid_natural[:, j], | |
z=z_pred[:, j], | |
mode='lines', | |
line=dict(color='gray', width=2), | |
showlegend=False, | |
hoverinfo='skip' | |
)) | |
# --- Fin de la adici贸n de la cuadr铆cula --- | |
# A帽adir los puntos de los experimentos en la superficie de respuesta con diferentes colores y etiquetas | |
# Crear una lista de colores y etiquetas para los puntos | |
colors = ['red', 'blue', 'green', 'purple', 'orange', 'yellow', 'cyan', 'magenta'] | |
point_labels = [] | |
for i, row in experiments_data.iterrows(): | |
point_labels.append(f"{row[self.y_name]:.2f}") | |
fig.add_trace(go.Scatter3d( | |
x=experiments_x_natural, | |
y=experiments_y_natural, | |
z=experiments_data[self.y_name], | |
mode='markers+text', | |
marker=dict(size=4, color=colors[:len(experiments_x_natural)]), # Usar colores de la lista | |
text=point_labels, # Usar las etiquetas creadas | |
textposition='top center', | |
name='Experimentos' | |
)) | |
# A帽adir etiquetas y t铆tulo con variables naturales | |
fig.update_layout( | |
scene=dict( | |
xaxis_title=varying_variables[0] + " (g/L)", | |
yaxis_title=varying_variables[1] + " (g/L)", | |
zaxis_title=self.y_name, | |
# Puedes mantener la configuraci贸n de grid en los planos si lo deseas | |
# xaxis=dict(showgrid=True, gridwidth=1, gridcolor='lightgray'), | |
# yaxis=dict(showgrid=True, gridwidth=1, gridcolor='lightgray'), | |
# zaxis=dict(showgrid=True, gridwidth=1, gridcolor='lightgray') | |
), | |
title=f"{self.y_name} vs {varying_variables[0]} y {varying_variables[1]}<br><sup>{fixed_variable} fijo en {fixed_level:.2f} (g/L) (Modelo Simplificado)</sup>", | |
height=800, | |
width=1000, | |
showlegend=True | |
) | |
return fig | |
def generate_all_plots(self): | |
""" | |
Genera todas las gr谩ficas de RSM, variando la variable fija y sus niveles usando el modelo simplificado. | |
""" | |
if self.model_simplified is None: | |
print("Error: Ajusta el modelo simplificado primero.") | |
return | |
# Niveles naturales para graficar | |
levels_to_plot_natural = { | |
self.x1_name: self.x1_levels, | |
self.x2_name: self.x2_levels, | |
self.x3_name: self.x3_levels | |
} | |
# Generar y mostrar gr谩ficos individuales | |
for fixed_variable in [self.x1_name, self.x2_name, self.x3_name]: | |
for level in levels_to_plot_natural[fixed_variable]: | |
fig = self.plot_rsm_individual(fixed_variable, level) | |
if fig is not None: | |
fig.show() | |
def coded_to_natural(self, coded_value, variable_name): | |
"""Convierte un valor codificado a su valor natural.""" | |
levels = self.get_levels(variable_name) | |
return levels[0] + (coded_value + 1) * (levels[-1] - levels[0]) / 2 | |
def natural_to_coded(self, natural_value, variable_name): | |
"""Convierte un valor natural a su valor codificado.""" | |
levels = self.get_levels(variable_name) | |
return -1 + 2 * (natural_value - levels[0]) / (levels[-1] - levels[0]) | |
def pareto_chart(self, model, title): | |
""" | |
Genera un diagrama de Pareto para los efectos estandarizados de un modelo, | |
incluyendo la l铆nea de significancia. | |
Args: | |
model: Modelo ajustado de statsmodels. | |
title (str): T铆tulo del gr谩fico. | |
""" | |
# Calcular los efectos estandarizados | |
tvalues = model.tvalues[1:] # Excluir la Intercept | |
abs_tvalues = np.abs(tvalues) | |
sorted_idx = np.argsort(abs_tvalues)[::-1] | |
sorted_tvalues = abs_tvalues[sorted_idx] | |
sorted_names = tvalues.index[sorted_idx] | |
# Calcular el valor cr铆tico de t para la l铆nea de significancia | |
alpha = 0.05 # Nivel de significancia | |
dof = model.df_resid # Grados de libertad residuales | |
t_critical = t.ppf(1 - alpha / 2, dof) | |
# Crear el diagrama de Pareto | |
fig = px.bar( | |
x=sorted_tvalues, | |
y=sorted_names, | |
orientation='h', | |
labels={'x': 'Efecto Estandarizado', 'y': 'T茅rmino'}, | |
title=title | |
) | |
fig.update_yaxes(autorange="reversed") | |
# Agregar la l铆nea de significancia | |
fig.add_vline(x=t_critical, line_dash="dot", | |
annotation_text=f"t cr铆tico = {t_critical:.2f}", | |
annotation_position="bottom right") | |
return fig | |
def get_simplified_equation(self): | |
""" | |
Imprime la ecuaci贸n del modelo simplificado. | |
""" | |
if self.model_simplified is None: | |
print("Error: Ajusta el modelo simplificado primero.") | |
return None | |
coefficients = self.model_simplified.params | |
equation = f"{self.y_name} = {coefficients['Intercept']:.4f}" | |
for term, coef in coefficients.items(): | |
if term != 'Intercept': | |
if term == f'{self.x1_name}': | |
equation += f" + {coef:.4f}*{self.x1_name}" | |
elif term == f'{self.x2_name}': | |
equation += f" + {coef:.4f}*{self.x2_name}" | |
elif term == f'{self.x3_name}': | |
equation += f" + {coef:.4f}*{self.x3_name}" | |
elif term == f'I({self.x1_name} ** 2)': | |
equation += f" + {coef:.4f}*{self.x1_name}^2" | |
elif term == f'I({self.x2_name} ** 2)': | |
equation += f" + {coef:.4f}*{self.x2_name}^2" | |
elif term == f'I({self.x3_name} ** 2)': | |
equation += f" + {coef:.4f}*{self.x3_name}^2" | |
return equation | |
def generate_prediction_table(self): | |
""" | |
Genera una tabla con los valores actuales, predichos y residuales. | |
""" | |
if self.model_simplified is None: | |
print("Error: Ajusta el modelo simplificado primero.") | |
return None | |
self.data['Predicho'] = self.model_simplified.predict(self.data) | |
self.data['Residual'] = self.data[self.y_name] - self.data['Predicho'] | |
return self.data[[self.y_name, 'Predicho', 'Residual']] | |
def calculate_contribution_percentage(self): | |
""" | |
Calcula el porcentaje de contribuci贸n de cada factor a la variabilidad de la respuesta (AIA). | |
""" | |
if self.model_simplified is None: | |
print("Error: Ajusta el modelo simplificado primero.") | |
return None | |
# ANOVA del modelo simplificado | |
anova_table = sm.stats.anova_lm(self.model_simplified, typ=2) | |
# Suma de cuadrados total | |
ss_total = anova_table['sum_sq'].sum() | |
# Crear tabla de contribuci贸n | |
contribution_table = pd.DataFrame({ | |
'Factor': [], | |
'Suma de Cuadrados': [], | |
'% Contribuci贸n': [] | |
}) | |
# Calcular porcentaje de contribuci贸n para cada factor | |
for index, row in anova_table.iterrows(): | |
if index != 'Residual': | |
factor_name = index | |
if factor_name == f'I({self.x1_name} ** 2)': | |
factor_name = f'{self.x1_name}^2' | |
elif factor_name == f'I({self.x2_name} ** 2)': | |
factor_name = f'{self.x2_name}^2' | |
elif factor_name == f'I({self.x3_name} ** 2)': | |
factor_name = f'{self.x3_name}^2' | |
ss_factor = row['sum_sq'] | |
contribution_percentage = (ss_factor / ss_total) * 100 | |
contribution_table = pd.concat([contribution_table, pd.DataFrame({ | |
'Factor': [factor_name], | |
'Suma de Cuadrados': [ss_factor], | |
'% Contribuci贸n': [contribution_percentage] | |
})], ignore_index=True) | |
return contribution_table | |
def calculate_detailed_anova(self): | |
""" | |
Calcula la tabla ANOVA detallada con la descomposici贸n del error residual. | |
""" | |
if self.model_simplified is None: | |
print("Error: Ajusta el modelo simplificado primero.") | |
return None | |
# --- ANOVA detallada --- | |
# 1. Ajustar un modelo solo con los t茅rminos de primer orden y cuadr谩ticos | |
formula_reduced = f'{self.y_name} ~ {self.x1_name} + {self.x2_name} + {self.x3_name} + ' \ | |
f'I({self.x1_name}**2) + I({self.x2_name}**2) + I({self.x3_name}**2)' | |
model_reduced = smf.ols(formula_reduced, data=self.data).fit() | |
# 2. ANOVA del modelo reducido (para obtener la suma de cuadrados de la regresi贸n) | |
anova_reduced = sm.stats.anova_lm(model_reduced, typ=2) | |
# 3. Suma de cuadrados total | |
ss_total = np.sum((self.data[self.y_name] - self.data[self.y_name].mean())**2) | |
# 4. Grados de libertad totales | |
df_total = len(self.data) - 1 | |
# 5. Suma de cuadrados de la regresi贸n | |
ss_regression = anova_reduced['sum_sq'][:-1].sum() # Sumar todo excepto 'Residual' | |
# 6. Grados de libertad de la regresi贸n | |
df_regression = len(anova_reduced) - 1 | |
# 7. Suma de cuadrados del error residual | |
ss_residual = self.model_simplified.ssr | |
df_residual = self.model_simplified.df_resid | |
# 8. Suma de cuadrados del error puro (se calcula a partir de las r茅plicas) | |
replicas = self.data[self.data.duplicated(subset=[self.x1_name, self.x2_name, self.x3_name], keep=False)] | |
ss_pure_error = replicas.groupby([self.x1_name, self.x2_name, self.x3_name])[self.y_name].var().sum() | |
df_pure_error = len(replicas) - len(replicas.groupby([self.x1_name, self.x2_name, self.x3_name])) | |
# 9. Suma de cuadrados de la falta de ajuste | |
ss_lack_of_fit = ss_residual - ss_pure_error | |
df_lack_of_fit = df_residual - df_pure_error | |
# 10. Cuadrados medios | |
ms_regression = ss_regression / df_regression | |
ms_residual = ss_residual / df_residual | |
ms_lack_of_fit = ss_lack_of_fit / df_lack_of_fit | |
ms_pure_error = ss_pure_error / df_pure_error | |
# 11. Estad铆stico F y valor p para la falta de ajuste | |
f_lack_of_fit = ms_lack_of_fit / ms_pure_error | |
p_lack_of_fit = 1 - f.cdf(f_lack_of_fit, df_lack_of_fit, df_pure_error) # Usar f.cdf de scipy.stats | |
# 12. Crear la tabla ANOVA detallada | |
detailed_anova_table = pd.DataFrame({ | |
'Fuente de Variaci贸n': ['Regresi贸n', 'Residual', 'Falta de Ajuste', 'Error Puro', 'Total'], | |
'Suma de Cuadrados': [ss_regression, ss_residual, ss_lack_of_fit, ss_pure_error, ss_total], | |
'Grados de Libertad': [df_regression, df_residual, df_lack_of_fit, df_pure_error, df_total], | |
'Cuadrado Medio': [ms_regression, ms_residual, ms_lack_of_fit, ms_pure_error, np.nan], | |
'F': [np.nan, np.nan, f_lack_of_fit, np.nan, np.nan], | |
'Valor p': [np.nan, np.nan, p_lack_of_fit, np.nan, np.nan] | |
}) | |
# Calcular la suma de cuadrados y grados de libertad para la curvatura | |
ss_curvature = anova_reduced['sum_sq'][f'I({self.x1_name} ** 2)'] + anova_reduced['sum_sq'][f'I({self.x2_name} ** 2)'] + anova_reduced['sum_sq'][f'I({self.x3_name} ** 2)'] | |
df_curvature = 3 | |
# A帽adir la fila de curvatura a la tabla ANOVA | |
detailed_anova_table.loc[len(detailed_anova_table)] = ['Curvatura', ss_curvature, df_curvature, ss_curvature / df_curvature, np.nan, np.nan] | |
# Reorganizar las filas para que la curvatura aparezca despu茅s de la regresi贸n | |
detailed_anova_table = detailed_anova_table.reindex([0, 5, 1, 2, 3, 4]) | |
# Resetear el 铆ndice para que sea consecutivo | |
detailed_anova_table = detailed_anova_table.reset_index(drop=True) | |
return detailed_anova_table | |
# --- Funciones para la interfaz de Gradio --- | |
def load_data(x1_name, x2_name, x3_name, y_name, x1_levels_str, x2_levels_str, x3_levels_str, data_str): | |
""" | |
Carga los datos del dise帽o Box-Behnken desde cajas de texto y crea la instancia de RSM_BoxBehnken. | |
Args: | |
x1_name (str): Nombre de la primera variable independiente. | |
x2_name (str): Nombre de la segunda variable independiente. | |
x3_name (str): Nombre de la tercera variable independiente. | |
y_name (str): Nombre de la variable dependiente. | |
x1_levels_str (str): Niveles de la primera variable, separados por comas. | |
x2_levels_str (str): Niveles de la segunda variable, separados por comas. | |
x3_levels_str (str): Niveles de la tercera variable, separados por comas. | |
data_str (str): Datos del experimento en formato CSV, separados por comas. | |
Returns: | |
tuple: (pd.DataFrame, str, str, str, str, list, list, list, gr.update) | |
""" | |
try: | |
# Convertir los niveles a listas de n煤meros | |
x1_levels = [float(x.strip()) for x in x1_levels_str.split(',')] | |
x2_levels = [float(x.strip()) for x in x2_levels_str.split(',')] | |
x3_levels = [float(x.strip()) for x in x3_levels_str.split(',')] | |
# Crear DataFrame a partir de la cadena de datos | |
data_list = [row.split(',') for row in data_str.strip().split('\n')] | |
column_names = ['Exp.', x1_name, x2_name, x3_name, y_name] | |
data = pd.DataFrame(data_list, columns=column_names) | |
data = data.apply(pd.to_numeric, errors='coerce') # Convertir a num茅rico | |
# Validar que el DataFrame tenga las columnas correctas | |
if not all(col in data.columns for col in column_names): | |
raise ValueError("El formato de los datos no es correcto.") | |
# Crear la instancia de RSM_BoxBehnken | |
global rsm | |
rsm = RSM_BoxBehnken(data, x1_name, x2_name, x3_name, y_name, x1_levels, x2_levels, x3_levels) | |
return data, x1_name, x2_name, x3_name, y_name, x1_levels, x2_levels, x3_levels, gr.update(visible=True) | |
except Exception as e: | |
return None, "", "", "", "", [], [], [], gr.update(visible=False), f"Error: {e}" | |
def fit_and_optimize_model(): | |
if 'rsm' not in globals(): | |
return None, None, None, None, None, None, "Error: Carga los datos primero." | |
model_completo, pareto_completo = rsm.fit_model() | |
model_simplificado, pareto_simplificado = rsm.fit_simplified_model() | |
optimization_table = rsm.optimize() | |
equation = rsm.get_simplified_equation() | |
prediction_table = rsm.generate_prediction_table() | |
contribution_table = rsm.calculate_contribution_percentage() | |
anova_table = rsm.calculate_detailed_anova() | |
# Formatear la ecuaci贸n para que se vea mejor en Markdown | |
equation_formatted = equation.replace(" + ", "<br>+ ").replace(" ** ", "^").replace("*", " 脳 ") | |
equation_formatted = f"### Ecuaci贸n del Modelo Simplificado:<br>{equation_formatted}" | |
return model_completo.summary().as_html(), pareto_completo, model_simplificado.summary().as_html(), pareto_simplificado, equation_formatted, optimization_table, prediction_table, contribution_table, anova_table | |
def generate_rsm_plot(fixed_variable, fixed_level): | |
if 'rsm' not in globals(): | |
return None, "Error: Carga los datos primero." | |
fig = rsm.plot_rsm_individual(fixed_variable, fixed_level) | |
return fig | |
# --- Crear la interfaz de Gradio --- | |
with gr.Blocks() as demo: | |
gr.Markdown("# Optimizaci贸n de la producci贸n de AIA usando RSM Box-Behnken") | |
with gr.Row(): | |
with gr.Column(): | |
gr.Markdown("## Configuraci贸n del Dise帽o") | |
x1_name_input = gr.Textbox(label="Nombre de la Variable X1 (ej. Glucosa)", value="Glucosa") | |
x2_name_input = gr.Textbox(label="Nombre de la Variable X2 (ej. Extracto de Levadura)", value="Extracto_de_Levadura") | |
x3_name_input = gr.Textbox(label="Nombre de la Variable X3 (ej. Tript贸fano)", value="Triptofano") | |
y_name_input = gr.Textbox(label="Nombre de la Variable Dependiente (ej. AIA (ppm))", value="AIA_ppm") | |
x1_levels_input = gr.Textbox(label="Niveles de X1 (separados por comas)", value="1, 3.5, 5.5") | |
x2_levels_input = gr.Textbox(label="Niveles de X2 (separados por comas)", value="0.03, 0.2, 0.3") | |
x3_levels_input = gr.Textbox(label="Niveles de X3 (separados por comas)", value="0.4, 0.65, 0.9") | |
data_input = gr.Textbox(label="Datos del Experimento (formato CSV)", lines=5, value="""1,-1,-1,0,166.594 | |
2,1,-1,0,177.557 | |
3,-1,1,0,127.261 | |
4,1,1,0,147.573 | |
5,-1,0,-1,188.883 | |
6,1,0,-1,224.527 | |
7,-1,0,1,190.238 | |
8,1,0,1,226.483 | |
9,0,-1,-1,195.550 | |
10,0,1,-1,149.493 | |
11,0,-1,1,187.683 | |
12,0,1,1,148.621 | |
13,0,0,0,278.951 | |
14,0,0,0,297.238 | |
15,0,0,0,280.896""") | |
load_button = gr.Button("Cargar Datos") | |
with gr.Column(): | |
gr.Markdown("## Datos Cargados") | |
data_output = gr.Dataframe(label="Tabla de Datos") | |
# Hacer que la secci贸n de an谩lisis sea visible solo despu茅s de cargar los datos | |
with gr.Row(visible=False) as analysis_row: | |
with gr.Column(): | |
fit_button = gr.Button("Ajustar Modelo y Optimizar") | |
gr.Markdown("**Modelo Completo**") | |
model_completo_output = gr.HTML() | |
pareto_completo_output = gr.Plot() | |
gr.Markdown("**Modelo Simplificado**") | |
model_simplificado_output = gr.HTML() | |
pareto_simplificado_output = gr.Plot() | |
equation_output = gr.HTML() | |
optimization_table_output = gr.Dataframe(label="Tabla de Optimizaci贸n") | |
prediction_table_output = gr.Dataframe(label="Tabla de Predicciones") | |
contribution_table_output = gr.Dataframe(label="Tabla de % de Contribuci贸n") | |
anova_table_output = gr.Dataframe(label="Tabla ANOVA Detallada") | |
with gr.Column(): | |
gr.Markdown("## Generar Gr谩ficos de Superficie de Respuesta") | |
fixed_variable_input = gr.Dropdown(label="Variable Fija", choices=["Glucosa", "Extracto_de_Levadura", "Triptofano"], value="Glucosa") | |
fixed_level_input = gr.Slider(label="Nivel de Variable Fija", minimum=0, maximum=1, step=0.01, value=0.5) | |
plot_button = gr.Button("Generar Gr谩fico") | |
rsm_plot_output = gr.Plot() | |
load_button.click( | |
load_data, | |
inputs=[x1_name_input, x2_name_input, x3_name_input, y_name_input, x1_levels_input, x2_levels_input, x3_levels_input, data_input], | |
outputs=[data_output, x1_name_input, x2_name_input, x3_name_input, y_name_input, x1_levels_input, x2_levels_input, x3_levels_input, analysis_row] | |
) | |
fit_button.click(fit_and_optimize_model, outputs=[model_summary_output, pareto_chart_completo, pareto_chart_simplificado, optimization_results_output]) | |
plot_button.click(generate_rsm_plot, inputs=[fixed_variable_input, fixed_level_input], outputs=[rsm_plot_output]) | |
# Ejemplo de uso | |
gr.Markdown("## Ejemplo de uso") | |
gr.Markdown("1. Introduce los nombres de las variables y sus niveles en las cajas de texto correspondientes.") | |
gr.Markdown("2. Copia y pega los datos del experimento en la caja de texto 'Datos del Experimento'.") | |
gr.Markdown("3. Haz clic en 'Cargar Datos' para cargar los datos en la tabla.") | |
gr.Markdown("4. Haz clic en 'Ajustar Modelo y Optimizar' para ajustar el modelo y encontrar los niveles 贸ptimos de los factores.") | |
gr.Markdown("5. Selecciona una variable fija y su nivel en los controles deslizantes.") | |
gr.Markdown("6. Haz clic en 'Generar Gr谩fico' para generar un gr谩fico de superficie de respuesta.") | |
demo.launch() |