feat: add flowchart representation of image prediction process and enhance README with flowchart and multi-model consensus methods overview
Browse files
FLOW.gv
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
digraph ImagePredictionFlow {
|
2 |
+
graph [fontname="Arial", fontsize="10", rankdir="TB"]; // Top-to-Bottom, Reduced Width
|
3 |
+
node [shape="rect", style="rounded,filled", fontname="Arial", fontsize="10", fillcolor="lightblue", gradientangle="90"];
|
4 |
+
edge [fontname="Arial", fontsize="8"];
|
5 |
+
|
6 |
+
A [label="User Upload,\nPredict", fillcolor="lightgreen"]; //Shorter Label
|
7 |
+
B [label="Img Pre-proc,\nAgent Init", fillcolor="lightyellow"];
|
8 |
+
|
9 |
+
subgraph cluster_models {
|
10 |
+
label = "Model Ensemble";
|
11 |
+
style = "dashed";
|
12 |
+
|
13 |
+
ImageIn [shape=point, label="", width=0, height=0];
|
14 |
+
Model1 [label="Model1", fillcolor="lightcoral"]; //Shorter Labels
|
15 |
+
Model2 [label="Model2", fillcolor="lightcoral"];
|
16 |
+
Model3 [label="Model3", fillcolor="lightcoral"];
|
17 |
+
Model4 [label="Model4", fillcolor="lightcoral"];
|
18 |
+
Model5 [label="Model5", fillcolor="lightcoral"];
|
19 |
+
Model6 [label="Model6", fillcolor="lightcoral"];
|
20 |
+
Model7 [label="Model7", fillcolor="lightcoral"];
|
21 |
+
WeightedConsensusInput [label="Model Results", fillcolor="lightyellow"]; //Shorter Label
|
22 |
+
|
23 |
+
ImageIn -> Model1; ImageIn -> Model2; ImageIn -> Model3; ImageIn -> Model4; ImageIn -> Model5; ImageIn -> Model6; ImageIn -> Model7;
|
24 |
+
Model1 -> WeightedConsensusInput; Model2 -> WeightedConsensusInput; Model3 -> WeightedConsensusInput; Model4 -> WeightedConsensusInput; Model5 -> WeightedConsensusInput; Model6 -> WeightedConsensusInput; Model7 -> WeightedConsensusInput;
|
25 |
+
}
|
26 |
+
|
27 |
+
ContextualIntelligenceAgent [label="Contextual\nIntelligence Agent", fillcolor="lightcyan"]; //Shorter Label
|
28 |
+
BaggingAgent [label="BaggingAgent", fillcolor="lightcyan"]; //Shorter Label
|
29 |
+
DeepEnsembleAgent [label="DeepEnsemble\nAgent", fillcolor="lightcyan"]; //Shorter Label
|
30 |
+
EvolutionEnsembleAgent [label="EvolutionEnsemble\nAgent", fillcolor="lightcyan"]; //Shorter Label
|
31 |
+
|
32 |
+
WeightManager [label="Weight\nManager", fillcolor="lightcyan"]; //Shorter Label
|
33 |
+
WeightedConsensus [label="Weighted\nConsensus", fillcolor="lightgreen"];
|
34 |
+
OptimizeAgent [label="Weight\nOpt Agent", fillcolor="lightcyan"]; //Shorter Label
|
35 |
+
|
36 |
+
|
37 |
+
subgraph cluster_forensics {
|
38 |
+
label = "Forensic Analysis";
|
39 |
+
style = "dashed";
|
40 |
+
|
41 |
+
ForensicIn [shape=point, label="", width=0, height=0];
|
42 |
+
GradientProcessing [label="Gradient\nProcessing", fillcolor="lightpink"]; //Shorter Labels
|
43 |
+
MinMaxProcessing [label="MinMax\nProcessing", fillcolor="lightpink"];
|
44 |
+
ELAPorcessing [label="ELAPorcessing", fillcolor="lightpink"];
|
45 |
+
BitPlaneExtraction [label="BitPlane\nExtraction", fillcolor="lightpink"];
|
46 |
+
WaveletBasedNoiseAnalysis [label="Wavelet\nNoise Analysis", fillcolor="lightpink"];
|
47 |
+
AnomalyAgent [label="Anomaly\nDetection", fillcolor="lightcyan"]; //Shorter Label
|
48 |
+
|
49 |
+
ForensicIn -> GradientProcessing; ForensicIn -> MinMaxProcessing; ForensicIn -> ELAPorcessing; ForensicIn -> BitPlaneExtraction; ForensicIn -> WaveletBasedNoiseAnalysis;
|
50 |
+
GradientProcessing -> AnomalyAgent; MinMaxProcessing -> AnomalyAgent; ELAPorcessing -> AnomalyAgent; BitPlaneExtraction -> AnomalyAgent; WaveletBasedNoiseAnalysis -> AnomalyAgent;
|
51 |
+
}
|
52 |
+
|
53 |
+
DataLoggingAndOutput [label="Data Logging\nOutput", fillcolor="lightsalmon"];//Shorter Label
|
54 |
+
ResultsDisplay [label="Results", fillcolor="lightgreen"]; //Shorter Label
|
55 |
+
|
56 |
+
// Connections
|
57 |
+
A -> B;
|
58 |
+
B -> ImageIn;
|
59 |
+
|
60 |
+
WeightedConsensusInput -> ContextualIntelligenceAgent; WeightedConsensusInput -> BaggingAgent; WeightedConsensusInput -> DeepEnsembleAgent; WeightedConsensusInput -> EvolutionEnsembleAgent; // Connect agents
|
61 |
+
ContextualIntelligenceAgent -> WeightManager; BaggingAgent -> WeightManager; DeepEnsembleAgent -> WeightManager; EvolutionEnsembleAgent -> WeightManager; // Agents to WM
|
62 |
+
WeightManager -> WeightedConsensus;
|
63 |
+
WeightedConsensus -> OptimizeAgent; OptimizeAgent -> WeightManager;
|
64 |
+
WeightedConsensus -> ForensicIn; AnomalyAgent -> DataLoggingAndOutput;
|
65 |
+
DataLoggingAndOutput -> ResultsDisplay;
|
66 |
+
}
|
README.md
CHANGED
@@ -219,6 +219,10 @@ When you upload an image for analysis and click the "Predict" button, the follow
|
|
219 |
* **Data Type Conversion**: Numerical values (like AI Score, Real Score) are converted to standard Python floats to ensure proper JSON serialization.
|
220 |
|
221 |
---
|
|
|
|
|
|
|
|
|
222 |
|
223 |
## Roadmap & Features
|
224 |
|
@@ -334,3 +338,22 @@ Here's the updated table with an additional column providing **instructions on h
|
|
334 |
|
335 |
---
|
336 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
219 |
* **Data Type Conversion**: Numerical values (like AI Score, Real Score) are converted to standard Python floats to ensure proper JSON serialization.
|
220 |
|
221 |
---
|
222 |
+
## Flow-Chart
|
223 |
+
|
224 |
+
<img src="graph_alt.svg">
|
225 |
+
|
226 |
|
227 |
## Roadmap & Features
|
228 |
|
|
|
338 |
|
339 |
---
|
340 |
|
341 |
+
---
|
342 |
+
### **Overview of Multi-Model Consensus Methods in ML**
|
343 |
+
| **Method** | **Category** | **Description** | **Key Advantages** | **Key Limitations** | **Weaknesses** | **Strengths** |
|
344 |
+
|--------------------------|----------------------------|--------------------------------------------------|---------------------------------------------------|--------------------------------------------------------------|----------------------------------------|--------------------------------------------------------------------------------|
|
345 |
+
| **Bagging (e.g., Random Forest)** | **Traditional Ensembles** | Trains multiple models on bootstrapped data subsets, aggregating predictions | Reduces overfitting (~variance reduction) | Computationally costly for large datasets; models can be correlated | Not robust to adversarial attacks | Simple to implement; robust to noisy data; handles high-dimensional data well |
|
346 |
+
| **Boosting (e.g., XGBoost, LightGBM)** | **Traditional Ensembles** | Iteratively corrects errors using weighted models | High accuracy on structured/tabular data | Risk of overfitting; sensitive to noisy data | Computationally intensive | Dominates in competitions (e.g., Kaggle); scalable for medium datasets |
|
347 |
+
| **Stacking** | **Traditional Ensembles** | Combines predictions via a meta-learner | Can outperform individual models; flexible | Increased complexity and data leakage risk | Requires careful hyperparameter tuning | Excels in combining diverse models (e.g., trees + SVMs + linear models) |
|
348 |
+
| **Deep Ensembles** | **Deep Learning Ensembles**| Multiple independently trained neural networks | Uncertainty estimation; robust to data shifts | High computational cost; memory-heavy | Model coordination challenges | State-of-the-art in safety-critical domains (e.g., medical imaging, autonomous vehicles) |
|
349 |
+
| **Snapshot Ensembles** | **Deep Learning Ensembles**| Saves models at different optimization stages | Efficient (only one training run) | Limited diversity (same architecture/init) | Requires careful checkpoint selection | Lightweight for tasks like on-device deployment |
|
350 |
+
| **Monte Carlo Dropout** | **Approximate Ensembles** | Applies dropout at inference to simulate many models | Free ensemble (during testing) | Approximates uncertainty poorly compared to deep ensembles | Limited diversity | Cheap and simple; useful for quick uncertainty estimates |
|
351 |
+
| **Mixture of Experts (MoE)** | **Scalable Ensembles** | Specialized sub-models (experts) with a gating mechanism | Efficient scaling (only activate sub-models) | Training instability; uneven expert utilization | Requires expert/gate orchestration | Dominates large-scale applications like Switch Transformers and Hyper-Cloud systems |
|
352 |
+
| **Bayesian Neural Networks (BNNs)** | **Probabilistic Ensembles** | Models weights as probability distributions | Built-in uncertainty quantification | Intractable to train exactly; approximations needed | Difficult optimization | Essential for risk-averse applications (robotics, finance) |
|
353 |
+
| **Ensemble Knowledge Distillation** | **Model Compression** | Trains a single model to mimic an ensemble | Reduces compute/memory demands | Loses some ensemble benefits (diversity, uncertainty) | Relies on a high-quality teacher ensemble | Enables deployment of ensemble-like performance in compact models (edge devices) |
|
354 |
+
| **Noisy Student Training** | **Semi-Supervised Ensembles** | Iterative self-training with teacher-student loops | Uses unlabeled data effectively; improves robustness| Needs large unlabeled data and computational resources | Vulnerable to error propagation | State-of-the-art in semi-supervised settings (e.g., NLP) |
|
355 |
+
| **Evolutionary Ensembles** | **Dynamic Ensembles** | Uses genetic algorithms to evolve model populations | Adaptive diversity generation | High time/cost for evolution; niche use cases | Hard to interpret | Useful for non-stationary environments/on datasets with drift |
|
356 |
+
| **Consensus Networks** | **NLP/Serverless Ensembles** | Distributes models across clients/aggregates votes | Decentralized privacy-preserving predictions | Communication overhead; non-i.i.d. data conflicts | Requires synchronized coordination | Fed into federated learning systems (e.g., healthcare, finance) |
|
357 |
+
| **Hybrid Systems** | **Cross-Architecture Ensembles** | Combines models (e.g., CNNs, GNNs, transformers) | Captures multi-modal or heterogeneous patterns | Integration complexity; delayed inference | Model conflicts | Dominates in tasks requiring domain-specific reasoning (e.g., drug discovery) |
|
358 |
+
| **Self-Supervised Ensembles** | **Vision/NLP** | Uses contrastive learning with multiple models (e.g., MoCo, SimCLR) | Data-efficient; strong performance on downstream tasks | Training is resource-heavy; requires pre-training at scale | Low interpretability | Foundations for modern vision/NLP architectures (e.g., resists data scarcity) |
|
359 |
+
---
|
graph.svg
ADDED
|
graph_alt.svg
ADDED
|