ProCreations commited on
Commit
c6b6431
·
verified ·
1 Parent(s): 8b8e38b

Upload index.html

Browse files
Files changed (1) hide show
  1. index.html +0 -903
index.html CHANGED
@@ -1,903 +0,0 @@
1
- <!DOCTYPE html>
2
- <html lang="en">
3
- <head>
4
- <meta charset="UTF-8">
5
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
6
- <title>AI Explainer: How Neural Networks Work</title>
7
- <style>
8
- * {
9
- margin: 0;
10
- padding: 0;
11
- box-sizing: border-box;
12
- }
13
-
14
- body {
15
- font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
16
- background: #0a0a0a;
17
- color: #e0e0e0;
18
- line-height: 1.6;
19
- overflow-x: hidden;
20
- }
21
-
22
- .container {
23
- max-width: 1200px;
24
- margin: 0 auto;
25
- padding: 20px;
26
- }
27
-
28
- header {
29
- text-align: center;
30
- padding: 40px 20px;
31
- background: linear-gradient(135deg, #1e3c72 0%, #2a5298 100%);
32
- margin-bottom: 40px;
33
- border-radius: 20px;
34
- }
35
-
36
- h1 {
37
- font-size: clamp(2rem, 5vw, 3rem);
38
- margin-bottom: 10px;
39
- background: linear-gradient(135deg, #fff 0%, #a8dadc 100%);
40
- -webkit-background-clip: text;
41
- -webkit-text-fill-color: transparent;
42
- }
43
-
44
- .mode-toggle {
45
- display: flex;
46
- justify-content: center;
47
- gap: 20px;
48
- margin: 30px 0;
49
- flex-wrap: wrap;
50
- }
51
-
52
- .mode-btn {
53
- padding: 12px 30px;
54
- background: #2a5298;
55
- color: white;
56
- border: none;
57
- border-radius: 50px;
58
- cursor: pointer;
59
- font-size: 16px;
60
- transition: all 0.3s ease;
61
- font-weight: 600;
62
- }
63
-
64
- .mode-btn.active {
65
- background: #4CAF50;
66
- transform: scale(1.05);
67
- }
68
-
69
- .mode-btn:hover {
70
- transform: translateY(-2px);
71
- box-shadow: 0 5px 15px rgba(74, 144, 226, 0.3);
72
- }
73
-
74
- .section {
75
- background: #1a1a1a;
76
- padding: 30px;
77
- margin-bottom: 30px;
78
- border-radius: 20px;
79
- box-shadow: 0 10px 30px rgba(0, 0, 0, 0.5);
80
- }
81
-
82
- .section h2 {
83
- color: #4CAF50;
84
- margin-bottom: 20px;
85
- font-size: clamp(1.5rem, 4vw, 2rem);
86
- }
87
-
88
- .section h3 {
89
- color: #81C784;
90
- margin: 20px 0 10px 0;
91
- font-size: clamp(1.2rem, 3vw, 1.5rem);
92
- }
93
-
94
- .math-content {
95
- background: #0d0d0d;
96
- padding: 20px;
97
- border-radius: 10px;
98
- overflow-x: auto;
99
- margin: 15px 0;
100
- border: 1px solid #333;
101
- }
102
-
103
- .learn-content {
104
- background: #1e3c72;
105
- padding: 20px;
106
- border-radius: 10px;
107
- margin: 15px 0;
108
- line-height: 1.8;
109
- }
110
-
111
- #xor-demo {
112
- background: #0d0d0d;
113
- padding: 20px;
114
- border-radius: 15px;
115
- margin: 20px 0;
116
- }
117
-
118
- #network-canvas {
119
- width: 100%;
120
- max-width: 800px;
121
- height: 400px;
122
- background: #000;
123
- border-radius: 10px;
124
- margin: 20px auto;
125
- display: block;
126
- }
127
-
128
- .controls {
129
- display: flex;
130
- gap: 15px;
131
- justify-content: center;
132
- flex-wrap: wrap;
133
- margin: 20px 0;
134
- }
135
-
136
- .control-btn {
137
- padding: 10px 25px;
138
- background: #4CAF50;
139
- color: white;
140
- border: none;
141
- border-radius: 5px;
142
- cursor: pointer;
143
- font-size: 16px;
144
- transition: all 0.3s ease;
145
- }
146
-
147
- .control-btn:hover {
148
- background: #45a049;
149
- transform: translateY(-2px);
150
- }
151
-
152
- .control-btn:disabled {
153
- background: #666;
154
- cursor: not-allowed;
155
- }
156
-
157
- .stats {
158
- display: grid;
159
- grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
160
- gap: 15px;
161
- margin: 20px 0;
162
- }
163
-
164
- .stat-box {
165
- background: #1a1a1a;
166
- padding: 15px;
167
- border-radius: 10px;
168
- text-align: center;
169
- border: 1px solid #333;
170
- }
171
-
172
- .stat-label {
173
- color: #888;
174
- font-size: 14px;
175
- }
176
-
177
- .stat-value {
178
- color: #4CAF50;
179
- font-size: 24px;
180
- font-weight: bold;
181
- margin-top: 5px;
182
- }
183
-
184
- .loss-chart {
185
- width: 100%;
186
- height: 200px;
187
- background: #000;
188
- border-radius: 10px;
189
- margin: 20px 0;
190
- }
191
-
192
- .formula {
193
- font-family: 'Courier New', monospace;
194
- color: #64B5F6;
195
- padding: 10px;
196
- background: rgba(0, 0, 0, 0.5);
197
- border-radius: 5px;
198
- overflow-x: auto;
199
- white-space: nowrap;
200
- margin: 10px 0;
201
- }
202
-
203
- .highlight {
204
- background: #4CAF50;
205
- color: #000;
206
- padding: 2px 6px;
207
- border-radius: 3px;
208
- font-weight: bold;
209
- }
210
-
211
- @media (max-width: 768px) {
212
- .container {
213
- padding: 10px;
214
- }
215
-
216
- .section {
217
- padding: 20px;
218
- }
219
-
220
- #network-canvas {
221
- height: 300px;
222
- }
223
-
224
- .controls {
225
- gap: 10px;
226
- }
227
-
228
- .control-btn {
229
- padding: 8px 20px;
230
- font-size: 14px;
231
- }
232
- }
233
-
234
- .mode-content {
235
- display: none;
236
- }
237
-
238
- .mode-content.active {
239
- display: block;
240
- }
241
-
242
- .animated-number {
243
- transition: all 0.3s ease;
244
- }
245
-
246
- @keyframes pulse {
247
- 0% { transform: scale(1); }
248
- 50% { transform: scale(1.1); }
249
- 100% { transform: scale(1); }
250
- }
251
-
252
- .pulse {
253
- animation: pulse 0.5s ease;
254
- }
255
- </style>
256
- </head>
257
- <body>
258
- <div class="container">
259
- <header>
260
- <h1>🧠 How AI Really Works</h1>
261
- <p>An Interactive Journey Inside Neural Networks</p>
262
- </header>
263
-
264
- <div class="mode-toggle">
265
- <button class="mode-btn active" onclick="setMode('learn')">🎓 Learn Mode</button>
266
- <button class="mode-btn" onclick="setMode('math')">🔢 Math Mode</button>
267
- </div>
268
-
269
- <div class="section">
270
- <h2>What is a Neural Network?</h2>
271
-
272
- <div class="mode-content learn-mode active">
273
- <div class="learn-content">
274
- <p>Imagine your brain is made of billions of tiny decision-makers called neurons. Each neuron:</p>
275
- <ul style="margin: 15px 0; padding-left: 30px;">
276
- <li>🎯 Takes in information (inputs)</li>
277
- <li>🤔 Thinks about it (processing)</li>
278
- <li>💡 Makes a decision (output)</li>
279
- </ul>
280
- <p>An AI neural network works the same way! It's like a simplified brain made of math. Let's see it in action!</p>
281
- </div>
282
- </div>
283
-
284
- <div class="mode-content math-mode">
285
- <div class="math-content">
286
- <p>A neural network is a function approximator that transforms inputs through layers of neurons:</p>
287
- <div class="formula">
288
- f(x) = σ(W₃ · σ(W₂ · σ(W₁ · x + b₁) + b₂) + b₃)
289
- </div>
290
- <p>Where:</p>
291
- <ul style="margin: 15px 0; padding-left: 30px;">
292
- <li>x = input vector</li>
293
- <li>Wᵢ = weight matrix for layer i</li>
294
- <li>bᵢ = bias vector for layer i</li>
295
- <li>σ = activation function (e.g., ReLU, sigmoid)</li>
296
- </ul>
297
- </div>
298
- </div>
299
- </div>
300
-
301
- <div class="section">
302
- <h2>🎮 Live XOR Training Demo</h2>
303
- <p>Watch an AI learn the XOR problem in real-time! XOR outputs 1 when inputs are different, 0 when same.</p>
304
-
305
- <div id="xor-demo">
306
- <canvas id="network-canvas"></canvas>
307
-
308
- <div class="controls">
309
- <button class="control-btn" onclick="startTraining()">▶️ Start Training</button>
310
- <button class="control-btn" onclick="pauseTraining()">⏸️ Pause</button>
311
- <button class="control-btn" onclick="resetNetwork()">🔄 Reset</button>
312
- <button class="control-btn" onclick="stepTraining()">⏭️ Step</button>
313
- </div>
314
-
315
- <div class="stats">
316
- <div class="stat-box">
317
- <div class="stat-label">Epoch</div>
318
- <div class="stat-value animated-number" id="epoch">0</div>
319
- </div>
320
- <div class="stat-box">
321
- <div class="stat-label">Loss</div>
322
- <div class="stat-value animated-number" id="loss">1.000</div>
323
- </div>
324
- <div class="stat-box">
325
- <div class="stat-label">Accuracy</div>
326
- <div class="stat-value animated-number" id="accuracy">0%</div>
327
- </div>
328
- <div class="stat-box">
329
- <div class="stat-label">Learning Rate</div>
330
- <div class="stat-value" id="learning-rate">0.1</div>
331
- </div>
332
- </div>
333
-
334
- <canvas id="loss-chart" class="loss-chart"></canvas>
335
- </div>
336
- </div>
337
-
338
- <div class="section">
339
- <h2>How Does Learning Work?</h2>
340
-
341
- <div class="mode-content learn-mode active">
342
- <h3>🎯 Forward Pass: Making Predictions</h3>
343
- <div class="learn-content">
344
- <p>The network makes a prediction by passing data forward through each layer:</p>
345
- <ol style="margin: 15px 0; padding-left: 30px;">
346
- <li><span class="highlight">Input</span>: Feed in the data (like 0,1 for XOR)</li>
347
- <li><span class="highlight">Multiply & Add</span>: Each connection has a "strength" (weight)</li>
348
- <li><span class="highlight">Activate</span>: Decide if the neuron should "fire"</li>
349
- <li><span class="highlight">Output</span>: Get the final prediction</li>
350
- </ol>
351
- </div>
352
-
353
- <h3>📉 Backward Pass: Learning from Mistakes</h3>
354
- <div class="learn-content">
355
- <p>When the network is wrong, it learns by adjusting its connections:</p>
356
- <ol style="margin: 15px 0; padding-left: 30px;">
357
- <li><span class="highlight">Calculate Error</span>: How wrong was the prediction?</li>
358
- <li><span class="highlight">Blame Game</span>: Which connections caused the error?</li>
359
- <li><span class="highlight">Adjust Weights</span>: Make connections stronger or weaker</li>
360
- <li><span class="highlight">Repeat</span>: Try again with new weights!</li>
361
- </ol>
362
- </div>
363
- </div>
364
-
365
- <div class="mode-content math-mode">
366
- <h3>Forward Propagation</h3>
367
- <div class="math-content">
368
- <p>For each layer l:</p>
369
- <div class="formula">
370
- z[l] = W[l] · a[l-1] + b[l]
371
- </div>
372
- <div class="formula">
373
- a[l] = σ(z[l])
374
- </div>
375
- <p>Where a[0] = x (input) and a[L] = ŷ (output)</p>
376
- </div>
377
-
378
- <h3>Backpropagation</h3>
379
- <div class="math-content">
380
- <p>Loss function (Mean Squared Error):</p>
381
- <div class="formula">
382
- L = ½ Σ(y - ŷ)²
383
- </div>
384
- <p>Gradient computation:</p>
385
- <div class="formula">
386
- δ[L] = ∇ₐL ⊙ σ'(z[L])
387
- </div>
388
- <div class="formula">
389
- δ[l] = (W[l+1]ᵀ · δ[l+1]) ⊙ σ'(z[l])
390
- </div>
391
- <p>Weight update:</p>
392
- <div class="formula">
393
- W[l] = W[l] - α · δ[l] · a[l-1]ᵀ
394
- </div>
395
- <div class="formula">
396
- b[l] = b[l] - α · δ[l]
397
- </div>
398
- </div>
399
- </div>
400
- </div>
401
-
402
- <div class="section">
403
- <h2>Key Components Explained</h2>
404
-
405
- <div class="mode-content learn-mode active">
406
- <h3>🔗 Weights & Biases</h3>
407
- <div class="learn-content">
408
- <p><span class="highlight">Weights</span> are like volume knobs - they control how much each input matters.</p>
409
- <p><span class="highlight">Biases</span> are like thresholds - they decide when a neuron should activate.</p>
410
- </div>
411
-
412
- <h3>⚡ Activation Functions</h3>
413
- <div class="learn-content">
414
- <p>These decide if a neuron should "fire" or not:</p>
415
- <ul style="margin: 15px 0; padding-left: 30px;">
416
- <li><span class="highlight">ReLU</span>: If positive, pass it on. If negative, block it!</li>
417
- <li><span class="highlight">Sigmoid</span>: Squash everything between 0 and 1</li>
418
- <li><span class="highlight">Tanh</span>: Squash everything between -1 and 1</li>
419
- </ul>
420
- </div>
421
-
422
- <h3>🎯 Gradient Descent</h3>
423
- <div class="learn-content">
424
- <p>Imagine you're blindfolded on a hill, trying to reach the bottom:</p>
425
- <ol style="margin: 15px 0; padding-left: 30px;">
426
- <li>Feel the slope around you (calculate gradient)</li>
427
- <li>Take a small step downhill (adjust weights)</li>
428
- <li>Repeat until you reach the bottom (minimum loss)</li>
429
- </ol>
430
- </div>
431
- </div>
432
-
433
- <div class="mode-content math-mode">
434
- <h3>Activation Functions</h3>
435
- <div class="math-content">
436
- <p><strong>ReLU:</strong></p>
437
- <div class="formula">
438
- f(x) = max(0, x)
439
- </div>
440
- <div class="formula">
441
- f'(x) = {1 if x > 0, 0 if x ≤ 0}
442
- </div>
443
-
444
- <p><strong>Sigmoid:</strong></p>
445
- <div class="formula">
446
- σ(x) = 1 / (1 + e⁻ˣ)
447
- </div>
448
- <div class="formula">
449
- σ'(x) = σ(x) · (1 - σ(x))
450
- </div>
451
-
452
- <p><strong>Tanh:</strong></p>
453
- <div class="formula">
454
- tanh(x) = (eˣ - e⁻ˣ) / (eˣ + e⁻ˣ)
455
- </div>
456
- <div class="formula">
457
- tanh'(x) = 1 - tanh²(x)
458
- </div>
459
- </div>
460
-
461
- <h3>Gradient Descent Update Rule</h3>
462
- <div class="math-content">
463
- <div class="formula">
464
- θₜ₊₁ = θₜ - α · ∇θ L(θₜ)
465
- </div>
466
- <p>Where:</p>
467
- <ul style="margin: 15px 0; padding-left: 30px;">
468
- <li>θ = parameters (weights and biases)</li>
469
- <li>α = learning rate</li>
470
- <li>∇θ L = gradient of loss with respect to parameters</li>
471
- </ul>
472
- </div>
473
- </div>
474
- </div>
475
- </div>
476
-
477
- <script>
478
- // Global variables
479
- let mode = 'learn';
480
- let network = null;
481
- let training = false;
482
- let epoch = 0;
483
- let lossHistory = [];
484
- const canvas = document.getElementById('network-canvas');
485
- const ctx = canvas.getContext('2d');
486
- const lossCanvas = document.getElementById('loss-chart');
487
- const lossCtx = lossCanvas.getContext('2d');
488
-
489
- // Set canvas sizes
490
- function resizeCanvases() {
491
- canvas.width = canvas.offsetWidth;
492
- canvas.height = canvas.offsetHeight;
493
- lossCanvas.width = lossCanvas.offsetWidth;
494
- lossCanvas.height = lossCanvas.offsetHeight;
495
- }
496
- resizeCanvases();
497
- window.addEventListener('resize', resizeCanvases);
498
-
499
- // Mode switching
500
- function setMode(newMode) {
501
- mode = newMode;
502
- document.querySelectorAll('.mode-btn').forEach(btn => {
503
- btn.classList.toggle('active', btn.textContent.toLowerCase().includes(newMode));
504
- });
505
- document.querySelectorAll('.mode-content').forEach(content => {
506
- content.classList.toggle('active', content.classList.contains(`${newMode}-mode`));
507
- });
508
- }
509
-
510
- // Neural Network Class
511
- class NeuralNetwork {
512
- constructor() {
513
- // Network architecture: 2-25-25-1 (roughly 100 parameters)
514
- this.layers = [2, 25, 25, 1];
515
- this.weights = [];
516
- this.biases = [];
517
- this.activations = [];
518
- this.zValues = [];
519
- this.gradients = [];
520
- this.learningRate = 0.1;
521
-
522
- this.initializeNetwork();
523
- }
524
-
525
- initializeNetwork() {
526
- // Xavier initialization
527
- for (let i = 1; i < this.layers.length; i++) {
528
- const rows = this.layers[i];
529
- const cols = this.layers[i-1];
530
- const scale = Math.sqrt(2.0 / cols);
531
-
532
- // Initialize weights
533
- this.weights[i-1] = [];
534
- for (let r = 0; r < rows; r++) {
535
- this.weights[i-1][r] = [];
536
- for (let c = 0; c < cols; c++) {
537
- this.weights[i-1][r][c] = (Math.random() * 2 - 1) * scale;
538
- }
539
- }
540
-
541
- // Initialize biases
542
- this.biases[i-1] = new Array(rows).fill(0);
543
- }
544
- }
545
-
546
- sigmoid(x) {
547
- return 1 / (1 + Math.exp(-x));
548
- }
549
-
550
- sigmoidDerivative(x) {
551
- const s = this.sigmoid(x);
552
- return s * (1 - s);
553
- }
554
-
555
- relu(x) {
556
- return Math.max(0, x);
557
- }
558
-
559
- reluDerivative(x) {
560
- return x > 0 ? 1 : 0;
561
- }
562
-
563
- forward(input) {
564
- this.activations = [input];
565
- this.zValues = [];
566
-
567
- for (let i = 0; i < this.weights.length; i++) {
568
- const z = [];
569
- const a = [];
570
-
571
- for (let j = 0; j < this.weights[i].length; j++) {
572
- let sum = this.biases[i][j];
573
- for (let k = 0; k < this.weights[i][j].length; k++) {
574
- sum += this.weights[i][j][k] * this.activations[i][k];
575
- }
576
- z.push(sum);
577
-
578
- // Use ReLU for hidden layers, sigmoid for output
579
- if (i < this.weights.length - 1) {
580
- a.push(this.relu(sum));
581
- } else {
582
- a.push(this.sigmoid(sum));
583
- }
584
- }
585
-
586
- this.zValues.push(z);
587
- this.activations.push(a);
588
- }
589
-
590
- return this.activations[this.activations.length - 1][0];
591
- }
592
-
593
- backward(input, target) {
594
- const output = this.forward(input);
595
- const error = output - target;
596
-
597
- // Initialize gradients
598
- this.gradients = [];
599
-
600
- // Output layer gradients
601
- let delta = [error * this.sigmoidDerivative(this.zValues[this.zValues.length - 1][0])];
602
- this.gradients.unshift(delta);
603
-
604
- // Hidden layer gradients
605
- for (let i = this.weights.length - 2; i >= 0; i--) {
606
- const newDelta = [];
607
- for (let j = 0; j < this.weights[i].length; j++) {
608
- let sum = 0;
609
- for (let k = 0; k < delta.length; k++) {
610
- sum += this.weights[i+1][k][j] * delta[k];
611
- }
612
- const activation = i > 0 ?
613
- this.reluDerivative(this.zValues[i][j]) :
614
- this.reluDerivative(this.zValues[i][j]);
615
- newDelta.push(sum * activation);
616
- }
617
- delta = newDelta;
618
- this.gradients.unshift(delta);
619
- }
620
-
621
- // Update weights and biases
622
- for (let i = 0; i < this.weights.length; i++) {
623
- for (let j = 0; j < this.weights[i].length; j++) {
624
- for (let k = 0; k < this.weights[i][j].length; k++) {
625
- this.weights[i][j][k] -= this.learningRate * this.gradients[i][j] * this.activations[i][k];
626
- }
627
- this.biases[i][j] -= this.learningRate * this.gradients[i][j];
628
- }
629
- }
630
-
631
- return error * error;
632
- }
633
-
634
- train(inputs, targets) {
635
- let totalLoss = 0;
636
- for (let i = 0; i < inputs.length; i++) {
637
- totalLoss += this.backward(inputs[i], targets[i]);
638
- }
639
- return totalLoss / inputs.length;
640
- }
641
-
642
- predict(input) {
643
- return this.forward(input);
644
- }
645
- }
646
-
647
- // XOR training data
648
- const xorInputs = [[0, 0], [0, 1], [1, 0], [1, 1]];
649
- const xorTargets = [0, 1, 1, 0];
650
-
651
- // Initialize network
652
- function resetNetwork() {
653
- network = new NeuralNetwork();
654
- epoch = 0;
655
- lossHistory = [];
656
- training = false;
657
- updateStats();
658
- drawNetwork();
659
- drawLossChart();
660
- }
661
-
662
- // Training functions
663
- function startTraining() {
664
- training = true;
665
- trainLoop();
666
- }
667
-
668
- function pauseTraining() {
669
- training = false;
670
- }
671
-
672
- function stepTraining() {
673
- if (!network) resetNetwork();
674
- trainStep();
675
- }
676
-
677
- function trainStep() {
678
- const loss = network.train(xorInputs, xorTargets);
679
- epoch++;
680
- lossHistory.push(loss);
681
- if (lossHistory.length > 100) lossHistory.shift();
682
-
683
- updateStats();
684
- drawNetwork();
685
- drawLossChart();
686
- }
687
-
688
- function trainLoop() {
689
- if (!training) return;
690
-
691
- trainStep();
692
-
693
- if (epoch < 1000 && lossHistory[lossHistory.length - 1] > 0.001) {
694
- requestAnimationFrame(trainLoop);
695
- } else {
696
- training = false;
697
- }
698
- }
699
-
700
- // Update statistics
701
- function updateStats() {
702
- document.getElementById('epoch').textContent = epoch;
703
-
704
- const loss = lossHistory.length > 0 ? lossHistory[lossHistory.length - 1] : 1;
705
- document.getElementById('loss').textContent = loss.toFixed(4);
706
-
707
- // Calculate accuracy
708
- let correct = 0;
709
- for (let i = 0; i < xorInputs.length; i++) {
710
- const prediction = network ? network.predict(xorInputs[i]) : 0.5;
711
- const rounded = Math.round(prediction);
712
- if (rounded === xorTargets[i]) correct++;
713
- }
714
- const accuracy = (correct / xorInputs.length * 100).toFixed(0);
715
- document.getElementById('accuracy').textContent = accuracy + '%';
716
-
717
- // Add pulse animation on high accuracy
718
- if (accuracy >= 100) {
719
- document.getElementById('accuracy').parentElement.classList.add('pulse');
720
- setTimeout(() => {
721
- document.getElementById('accuracy').parentElement.classList.remove('pulse');
722
- }, 500);
723
- }
724
- }
725
-
726
- // Visualization functions
727
- function drawNetwork() {
728
- ctx.clearRect(0, 0, canvas.width, canvas.height);
729
-
730
- if (!network) return;
731
-
732
- const layerSpacing = canvas.width / (network.layers.length + 1);
733
- const neurons = [];
734
-
735
- // Calculate neuron positions
736
- for (let i = 0; i < network.layers.length; i++) {
737
- neurons[i] = [];
738
- const layerSize = network.layers[i];
739
- const ySpacing = canvas.height / (layerSize + 1);
740
-
741
- for (let j = 0; j < layerSize; j++) {
742
- const x = layerSpacing * (i + 1);
743
- const y = ySpacing * (j + 1);
744
- neurons[i].push({ x, y });
745
- }
746
- }
747
-
748
- // Draw connections
749
- for (let i = 0; i < network.weights.length; i++) {
750
- for (let j = 0; j < network.weights[i].length; j++) {
751
- for (let k = 0; k < network.weights[i][j].length; k++) {
752
- const weight = network.weights[i][j][k];
753
- const opacity = Math.min(Math.abs(weight) / 2, 1);
754
-
755
- ctx.beginPath();
756
- ctx.moveTo(neurons[i][k].x, neurons[i][k].y);
757
- ctx.lineTo(neurons[i+1][j].x, neurons[i+1][j].y);
758
-
759
- if (weight > 0) {
760
- ctx.strokeStyle = `rgba(76, 175, 80, ${opacity})`;
761
- } else {
762
- ctx.strokeStyle = `rgba(244, 67, 54, ${opacity})`;
763
- }
764
-
765
- ctx.lineWidth = Math.abs(weight) * 2;
766
- ctx.stroke();
767
- }
768
- }
769
- }
770
-
771
- // Draw neurons
772
- for (let i = 0; i < neurons.length; i++) {
773
- for (let j = 0; j < neurons[i].length; j++) {
774
- const neuron = neurons[i][j];
775
-
776
- // Get activation value
777
- let activation = 0;
778
- if (network.activations[i] && network.activations[i][j] !== undefined) {
779
- activation = network.activations[i][j];
780
- }
781
-
782
- const intensity = Math.min(activation * 255, 255);
783
-
784
- ctx.beginPath();
785
- ctx.arc(neuron.x, neuron.y, 15, 0, Math.PI * 2);
786
- ctx.fillStyle = `rgb(${intensity}, ${intensity}, ${255})`;
787
- ctx.fill();
788
- ctx.strokeStyle = '#4CAF50';
789
- ctx.lineWidth = 2;
790
- ctx.stroke();
791
-
792
- // Draw activation value for visible neurons
793
- if (network.layers[i] <= 5 || i === 0 || i === network.layers.length - 1) {
794
- ctx.fillStyle = '#fff';
795
- ctx.font = '10px Arial';
796
- ctx.textAlign = 'center';
797
- ctx.textBaseline = 'middle';
798
- ctx.fillText(activation.toFixed(2), neuron.x, neuron.y);
799
- }
800
- }
801
- }
802
-
803
- // Draw layer labels
804
- ctx.fillStyle = '#888';
805
- ctx.font = '14px Arial';
806
- ctx.textAlign = 'center';
807
-
808
- const labels = ['Input', 'Hidden 1', 'Hidden 2', 'Output'];
809
- for (let i = 0; i < network.layers.length; i++) {
810
- const x = layerSpacing * (i + 1);
811
- ctx.fillText(labels[i], x, 30);
812
- ctx.fillText(`(${network.layers[i]} neurons)`, x, 45);
813
- }
814
-
815
- // Draw XOR truth table
816
- ctx.fillStyle = '#4CAF50';
817
- ctx.font = '12px Arial';
818
- ctx.textAlign = 'left';
819
- ctx.fillText('XOR Truth Table:', 20, canvas.height - 80);
820
- ctx.fillStyle = '#888';
821
- ctx.fillText('0 XOR 0 = 0', 20, canvas.height - 60);
822
- ctx.fillText('0 XOR 1 = 1', 20, canvas.height - 45);
823
- ctx.fillText('1 XOR 0 = 1', 20, canvas.height - 30);
824
- ctx.fillText('1 XOR 1 = 0', 20, canvas.height - 15);
825
-
826
- // Show current predictions
827
- if (network) {
828
- ctx.fillStyle = '#4CAF50';
829
- ctx.fillText('Network Output:', 150, canvas.height - 80);
830
- ctx.fillStyle = '#888';
831
- for (let i = 0; i < xorInputs.length; i++) {
832
- const prediction = network.predict(xorInputs[i]);
833
- const text = `${xorInputs[i][0]} XOR ${xorInputs[i][1]} = ${prediction.toFixed(3)}`;
834
- ctx.fillText(text, 150, canvas.height - 60 + i * 15);
835
- }
836
- }
837
- }
838
-
839
- function drawLossChart() {
840
- lossCtx.clearRect(0, 0, lossCanvas.width, lossCanvas.height);
841
-
842
- if (lossHistory.length < 2) return;
843
-
844
- // Find min and max for scaling
845
- const maxLoss = Math.max(...lossHistory, 0.5);
846
- const minLoss = 0;
847
-
848
- // Draw axes
849
- lossCtx.strokeStyle = '#444';
850
- lossCtx.lineWidth = 1;
851
- lossCtx.beginPath();
852
- lossCtx.moveTo(40, 10);
853
- lossCtx.lineTo(40, lossCanvas.height - 30);
854
- lossCtx.lineTo(lossCanvas.width - 10, lossCanvas.height - 30);
855
- lossCtx.stroke();
856
-
857
- // Draw labels
858
- lossCtx.fillStyle = '#888';
859
- lossCtx.font = '12px Arial';
860
- lossCtx.textAlign = 'right';
861
- lossCtx.fillText(maxLoss.toFixed(3), 35, 15);
862
- lossCtx.fillText('0', 35, lossCanvas.height - 30);
863
- lossCtx.textAlign = 'center';
864
- lossCtx.fillText('Loss over Time', lossCanvas.width / 2, lossCanvas.height - 10);
865
-
866
- // Draw loss curve
867
- lossCtx.strokeStyle = '#4CAF50';
868
- lossCtx.lineWidth = 2;
869
- lossCtx.beginPath();
870
-
871
- const xStep = (lossCanvas.width - 50) / (lossHistory.length - 1);
872
- const yScale = (lossCanvas.height - 50) / (maxLoss - minLoss);
873
-
874
- for (let i = 0; i < lossHistory.length; i++) {
875
- const x = 40 + i * xStep;
876
- const y = lossCanvas.height - 30 - (lossHistory[i] - minLoss) * yScale;
877
-
878
- if (i === 0) {
879
- lossCtx.moveTo(x, y);
880
- } else {
881
- lossCtx.lineTo(x, y);
882
- }
883
- }
884
-
885
- lossCtx.stroke();
886
-
887
- // Draw current loss point
888
- if (lossHistory.length > 0) {
889
- const lastX = 40 + (lossHistory.length - 1) * xStep;
890
- const lastY = lossCanvas.height - 30 - (lossHistory[lossHistory.length - 1] - minLoss) * yScale;
891
-
892
- lossCtx.beginPath();
893
- lossCtx.arc(lastX, lastY, 4, 0, Math.PI * 2);
894
- lossCtx.fillStyle = '#4CAF50';
895
- lossCtx.fill();
896
- }
897
- }
898
-
899
- // Initialize
900
- resetNetwork();
901
- </script>
902
- </body>
903
- </html>