#!/usr/bin/env python3 import os import base64 import streamlit as st import csv import time from dataclasses import dataclass import zipfile import logging from streamlit.components.v1 import html from PIL import Image # Logging setup logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") logger = logging.getLogger(__name__) log_records = [] class LogCaptureHandler(logging.Handler): def emit(self, record): log_records.append(record) logger.addHandler(LogCaptureHandler()) st.set_page_config(page_title="SFT Tiny Titans π", page_icon="π€", layout="wide", initial_sidebar_state="expanded") # Model Configurations @dataclass class ModelConfig: name: str base_model: str model_type: str = "causal_lm" @property def model_path(self): return f"models/{self.name}" @dataclass class DiffusionConfig: name: str base_model: str @property def model_path(self): return f"diffusion_models/{self.name}" # Lazy-loaded Builders class ModelBuilder: def __init__(self): self.config = None self.model = None self.tokenizer = None def load_model(self, model_path: str, config: ModelConfig): try: from transformers import AutoModelForCausalLM, AutoTokenizer import torch logger.info(f"Loading NLP model: {model_path}") self.model = AutoModelForCausalLM.from_pretrained(model_path) self.tokenizer = AutoTokenizer.from_pretrained(model_path) if self.tokenizer.pad_token is None: self.tokenizer.pad_token = self.tokenizer.eos_token self.config = config self.model.to(torch.device("cuda" if torch.cuda.is_available() else "cpu")) logger.info("NLP model loaded successfully") except Exception as e: logger.error(f"Error loading NLP model: {str(e)}") raise def fine_tune(self, csv_path): try: from torch.utils.data import Dataset, DataLoader import torch logger.info(f"Starting NLP fine-tuning with {csv_path}") class SFTDataset(Dataset): def __init__(self, data, tokenizer): self.data = data self.tokenizer = tokenizer def __len__(self): return len(self.data) def __getitem__(self, idx): prompt = self.data[idx]["prompt"] response = self.data[idx]["response"] inputs = self.tokenizer(f"{prompt} {response}", return_tensors="pt", padding="max_length", max_length=128, truncation=True) labels = inputs["input_ids"].clone() labels[0, :len(self.tokenizer(prompt)["input_ids"][0])] = -100 return {"input_ids": inputs["input_ids"][0], "attention_mask": inputs["attention_mask"][0], "labels": labels[0]} data = [] with open(csv_path, "r") as f: reader = csv.DictReader(f) for row in reader: data.append({"prompt": row["prompt"], "response": row["response"]}) dataset = SFTDataset(data, self.tokenizer) dataloader = DataLoader(dataset, batch_size=2) optimizer = torch.optim.AdamW(self.model.parameters(), lr=2e-5) self.model.train() for _ in range(1): for batch in dataloader: optimizer.zero_grad() outputs = self.model(**{k: v.to(self.model.device) for k, v in batch.items()}) outputs.loss.backward() optimizer.step() logger.info("NLP fine-tuning completed") except Exception as e: logger.error(f"Error in NLP fine-tuning: {str(e)}") raise def evaluate(self, prompt: str): try: import torch logger.info(f"Evaluating NLP with prompt: {prompt}") self.model.eval() with torch.no_grad(): inputs = self.tokenizer(prompt, return_tensors="pt", max_length=128, truncation=True).to(self.model.device) outputs = self.model.generate(**inputs, max_new_tokens=50) result = self.tokenizer.decode(outputs[0], skip_special_tokens=True) logger.info(f"NLP evaluation result: {result}") return result except Exception as e: logger.error(f"Error in NLP evaluation: {str(e)}") raise class DiffusionBuilder: def __init__(self): self.config = None self.pipeline = None def load_model(self, model_path: str, config: DiffusionConfig): try: from diffusers import StableDiffusionPipeline import torch logger.info(f"Loading diffusion model: {model_path}") self.pipeline = StableDiffusionPipeline.from_pretrained(model_path) self.pipeline.to(torch.device("cuda" if torch.cuda.is_available() else "cpu")) self.config = config logger.info("Diffusion model loaded successfully") except Exception as e: logger.error(f"Error loading diffusion model: {str(e)}") raise def fine_tune(self, images, texts): try: import torch import numpy as np logger.info("Starting diffusion fine-tuning") optimizer = torch.optim.AdamW(self.pipeline.unet.parameters(), lr=1e-5) self.pipeline.unet.train() for _ in range(1): for img, text in zip(images, texts): optimizer.zero_grad() img_tensor = torch.tensor(np.array(img)).permute(2, 0, 1).unsqueeze(0).float().to(self.pipeline.device) / 255.0 latents = self.pipeline.vae.encode(img_tensor).latent_dist.sample() noise = torch.randn_like(latents) timesteps = torch.randint(0, self.pipeline.scheduler.num_train_timesteps, (1,), device=latents.device) noisy_latents = self.pipeline.scheduler.add_noise(latents, noise, timesteps) text_emb = self.pipeline.text_encoder(self.pipeline.tokenizer(text, return_tensors="pt").input_ids.to(self.pipeline.device))[0] pred_noise = self.pipeline.unet(noisy_latents, timesteps, encoder_hidden_states=text_emb).sample loss = torch.nn.functional.mse_loss(pred_noise, noise) loss.backward() optimizer.step() logger.info("Diffusion fine-tuning completed") except Exception as e: logger.error(f"Error in diffusion fine-tuning: {str(e)}") raise def generate(self, prompt: str): try: logger.info(f"Generating image with prompt: {prompt}") img = self.pipeline(prompt, num_inference_steps=20).images[0] logger.info("Image generated successfully") return img except Exception as e: logger.error(f"Error in image generation: {str(e)}") raise # Utilities def get_download_link(file_path, mime_type="text/plain", label="Download"): with open(file_path, 'rb') as f: data = f.read() b64 = base64.b64encode(data).decode() return f'{label} π₯' def generate_filename(sequence, ext="png"): from datetime import datetime import pytz central = pytz.timezone('US/Central') timestamp = datetime.now(central).strftime("%d%m%Y%H%M%S%p") return f"{sequence}{timestamp}.{ext}" def get_gallery_files(file_types): import glob return sorted([f for ext in file_types for f in glob.glob(f"*.{ext}")]) def zip_files(files, zip_name): with zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED) as zipf: for file in files: zipf.write(file, os.path.basename(file)) return zip_name # JavaScript/HTML Dual Camera Component dual_camera_html = """