ysn-rfd commited on
Commit
835424e
·
verified ·
1 Parent(s): 300af14

Upload 26 files

Browse files
BIN_BUCKET/__pycache__/engine.cpython-310.pyc ADDED
Binary file (709 Bytes). View file
 
BIN_BUCKET/__pycache__/engine_extra.cpython-310.pyc ADDED
Binary file (556 Bytes). View file
 
BIN_BUCKET/app_main.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from engine import *
2
+ from engine_extra import *
3
+ import numpy as np
4
+
5
+ nnr = nn()
6
+ nnc = extra_engine()
7
+ mmc1 = nnr.nnp(2,2,2,2)
8
+ mmc2 = nnc.ext_engine(3,3,3)
9
+ mult_mmc = mmc1*mmc2
10
+ #---------------------------------
11
+ sigma_1 = nnr.nnp(2,2,2,2)
12
+ sigma_2 = nnc.ext_engine(2,2,2)
13
+ sigma_nc = sigma_1**sigma_2
14
+
15
+ #---------------------------------
16
+ #sigma_opt = sigma_1/sigma_2*sigma_nc**sigma_1+sigma_1+sigma_2*np.pi
17
+ #sigma_opt_2 = np.array([sigma_opt])*np.pi/sigma_opt
18
+ #show = nnr.nnp(2,2,2,2)
19
+ #print('dim:', show)
20
+ #print('mult mmc:', mult_mmc, 'mmc1:', mmc1, 'mmc2:', mmc2)
21
+ #---------------------------------
22
+
23
+ print('sigma nc:', sigma_nc)
24
+
25
+
BIN_BUCKET/engine.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ENGINE
3
+ =====
4
+
5
+ Neural Network NN or (nn) engine, tensor calc neural network
6
+
7
+ Engine NN
8
+ ====
9
+ """
10
+
11
+ class nn():
12
+
13
+ def nnp(self, x, y, z, n_layer):
14
+ #---------------------
15
+
16
+ x = x
17
+ y = y
18
+ z = z
19
+ n_layer = n_layer
20
+
21
+ #-----------------------
22
+
23
+ if x == 0:
24
+ x = 1
25
+ else:
26
+ pass
27
+
28
+ if y == 0:
29
+ y = 1
30
+ else:
31
+ pass
32
+
33
+ if z == 0:
34
+ z = 1
35
+ else:
36
+ pass
37
+
38
+ if n_layer == 0:
39
+ print('L_ERR, Set To 1')
40
+ n_layer = 1
41
+ else:
42
+ pass
43
+
44
+ #----------------------
45
+
46
+ dim = x*y*z
47
+ g_layer = dim*n_layer
48
+
49
+ #----------------------
50
+
51
+ return g_layer
52
+
BIN_BUCKET/engine_extra.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class extra_engine():
2
+ def ext_engine(self, n_layer, fwd_layer, bcw_layer):
3
+ n_layer = n_layer
4
+ fwd_layer = fwd_layer
5
+ bcw_layer = bcw_layer
6
+ #------------------------
7
+ mult = n_layer**fwd_layer
8
+ sum_stg1 = mult+bcw_layer
9
+ sum_stg2 = fwd_layer*bcw_layer
10
+ fine_sum = sum_stg1+sum_stg2
11
+ #----------------------------
12
+ return fine_sum
13
+
basic_python_study/class_std_day1_1.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class Person:
2
+ def __init__(self, name, age):
3
+ self.name = name
4
+ self.age = age
5
+
6
+ user = Person("John", 36)
7
+
8
+ print(user.name)
9
+ print(user.age)
10
+
11
+ #--------------------------------------
12
+
13
+ class Person2:
14
+ def __init__(self, name, age):
15
+ self.name = name
16
+ self.age = age
17
+
18
+ def __str__(self):
19
+ return f"{self.name}({self.age})"
20
+
21
+ user2 = Person2("John", 36)
22
+
23
+ print(user2)
24
+
25
+
26
+
27
+
28
+
29
+
30
+
basic_python_study/class_std_day1_2.py ADDED
File without changes
basic_python_study/socket_day1_1.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import socket
2
+
3
+ HOST = ("192.168.1.1") # Standard loopback interface address (localhost)
4
+ PORT = 65432 # Port to listen on (non-privileged ports are > 1023)
5
+
6
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
7
+ s.connect(HOST)
8
+ s.listen()
9
+ conn, addr = s.accept()
10
+ with conn:
11
+ print(f"Connected by {addr}")
12
+ while True:
13
+ data = conn.recv(1024)
14
+ if not data:
15
+ break
16
+ conn.sendall(data)
data.csv ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ text,label
2
+ "I love this product! It's amazing.",positive
3
+ "This is the worst purchase I've ever made.",negative
4
+ "Great service and friendly staff.",positive
5
+ "I'm not satisfied with the quality.",negative
6
+ "Absolutely fantastic! Highly recommend it.",positive
7
+ "Terrible experience. Will not buy again.",negative
8
+ "The best investment I've ever made.",positive
9
+ "Very disappointed. It didn't work as expected.",negative
10
+ "I am extremely happy with my purchase!",positive
11
+ "Not worth the money. Very cheap.",negative
12
+ "Excellent quality! Will buy again.",positive
13
+ "Completely useless. A waste of money.",negative
14
+ "I can't believe how good this is!",positive
15
+ "The delivery was late and customer service was unhelpful.",negative
16
+ "I will tell all my friends about this!",positive
17
+ "The product did not meet my expectations.",negative
18
+ "Fantastic! I will definitely return.",positive
19
+ "Poor quality and bad customer support.",negative
20
+ "I was pleasantly surprised by this product.",positive
21
+ "Not impressed, it broke after a week.",negative
22
+ "Five stars! Exceeded my expectations.",positive
23
+ "I would not recommend this to anyone.",negative
24
+ "Very satisfied with my purchase!",positive
25
+ "This is the best I've ever tried.",positive
26
+ "Such a letdown, I expected more.",negative
27
+ "Perfect for what I needed!",positive
28
+ "Terrible quality, do not buy.",negative
29
+ "I'm very pleased with my experience.",positive
30
+ "Will never buy from this company again.",negative
31
+ "Impressive quality and fast shipping!",positive
32
+ "Not what I expected at all.",negative
33
+ "I'm thrilled with the results!",positive
34
+ "Disappointing performance, very slow.",negative
35
+ "I absolutely love it!",positive
36
+ "Would give zero stars if I could.",negative
37
+ "Great value for the price.",positive
38
+ "Awful, I want my money back.",negative
39
+ "Highly satisfied, I will be a returning customer.",positive
40
+ "Really bad service, they didn't care.",negative
41
+ "Such a great find! I'm so happy.",positive
42
+ "Never again, this was a terrible mistake.",negative
43
+ "Fantastic product for the price!",positive
44
+ "Don't waste your time or money.",negative
45
+ "This is my go-to product now!",positive
46
+ "Extremely disappointed, it broke easily.",negative
47
+ "Best purchase of the year!",positive
48
+ "Extremely poor quality, do not recommend.",negative
49
+ "Superb! Exactly what I was looking for.",positive
50
+ "Very frustrating experience, I'm unhappy.",negative
51
+ "I can't get enough of this!",positive
52
+ "Would not recommend this product.",negative
53
+ "I would definitely buy this again!",positive
54
+ "Completely unsatisfactory.",negative
55
+ "Very high quality, I'm impressed!",positive
56
+ "One of the worst purchases I've made.",negative
57
+ "Excellent product! I use it every day.",positive
58
+ "Not worth the hype.",negative
data.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Once upon a time in a distant land, there was a wise old man who lived in a small village. He had seen many seasons come and go and had gathered wisdom from every experience.
2
+
3
+ One day, a young traveler arrived at the village and asked the old man, "What is the secret to happiness?"
4
+
5
+ The old man smiled and said, "Happiness is like the wind. You cannot catch it, but you can feel it when you open your heart."
6
+
7
+ The traveler pondered these words as he continued his journey.
8
+
9
+ Meanwhile, in another part of the world, a great war was raging. Two kingdoms, locked in battle, sought dominance over the land. But among the soldiers, there was one warrior who questioned the purpose of the war. "Why do we fight?" he asked his commander.
10
+
11
+ "Because power must be won," the commander replied.
12
+
13
+ "But at what cost?" the warrior thought.
14
+
15
+ In a different time and place, a scientist was on the brink of a great discovery. She had spent years studying the stars, hoping to find the answer to humanity’s biggest question: Are we alone?
16
+
17
+ One night, as she gazed through her telescope, she saw something unusual—a faint signal from a distant galaxy. Was it a message? A call from another world? She recorded her findings and prepared to share them with the world.
18
+
19
+ The universe is vast, filled with stories waiting to be told. Every moment, a new tale begins.
20
+
21
+ And so, the story continues...
numpy_study/day1_1.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ out = np.array(1)
4
+
5
+ print(out)
pytorch_directml/__pycache__/dataloader_classification.cpython-310.pyc ADDED
Binary file (2.81 kB). View file
 
pytorch_directml/__pycache__/test_classification.cpython-310.pyc ADDED
Binary file (5.73 kB). View file
 
pytorch_directml/dataloader_classification.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch_directml
3
+ from torch import nn
4
+ from torch.utils.data import DataLoader
5
+ from torchvision import datasets
6
+ from torchvision.transforms import ToTensor, Lambda, Compose, transforms
7
+ import torchvision.models as models
8
+ import collections
9
+ import matplotlib.pyplot as plt
10
+ import argparse
11
+ import time
12
+ import os
13
+ import pathlib
14
+
15
+ def get_pytorch_root(path):
16
+ return pathlib.Path(__file__).parent.parent.resolve()
17
+
18
+
19
+ def get_pytorch_data():
20
+ return str(os.path.join(pathlib.Path(__file__).parent.parent.resolve(), 'data'))
21
+
22
+
23
+ def get_data_path(path):
24
+ if (os.path.isabs(path)):
25
+ return path
26
+ else:
27
+ return str(os.path.join(get_pytorch_data(), path))
28
+
29
+
30
+ def print_dataloader(dataloader, mode):
31
+ for X, y in dataloader:
32
+ print("\t{} data X [N, C, H, W]: \n\t\tshape={}, \n\t\tdtype={}".format(mode, X.shape, X.dtype))
33
+ print("\t{} data Y: \n\t\tshape={}, \n\t\tdtype={}".format(mode, y.shape, y.dtype))
34
+ break
35
+
36
+
37
+ def create_training_data_transform(input_size):
38
+ return transforms.Compose([transforms.RandomResizedCrop(input_size),
39
+ transforms.RandomHorizontalFlip(),
40
+ transforms.ToTensor(),
41
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
42
+
43
+
44
+ def create_training_dataloader(path, batch_size, input_size=224):
45
+ path = get_data_path(path)
46
+ print('Loading the training dataset from: {}'.format(path))
47
+ train_transform = create_training_data_transform(input_size)
48
+ training_set = datasets.CIFAR10(root=path, train=True, download=False, transform=train_transform)
49
+ data_loader = DataLoader(dataset=training_set, batch_size=batch_size, shuffle=True, num_workers=0)
50
+ print_dataloader(data_loader, 'Train')
51
+ return data_loader
52
+
53
+
54
+ def create_testing_data_transform(input_size):
55
+ return transforms.Compose([
56
+ transforms.Resize(256),
57
+ transforms.CenterCrop(input_size),
58
+ transforms.ToTensor(),
59
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
60
+ ])
61
+
62
+
63
+ def create_testing_dataloader(path, batch_size, input_size=224):
64
+ path = get_data_path(path)
65
+ print('Loading the testing dataset from: {}'.format(path))
66
+ test_transform = create_testing_data_transform(input_size)
67
+ test_set = datasets.CIFAR10(root=path, train=False, download=False, transform=test_transform)
68
+ data_loader = DataLoader(dataset=test_set, batch_size=batch_size, shuffle=False, num_workers=0)
69
+ print_dataloader(data_loader, 'Test')
70
+ return data_loader
pytorch_directml/test_classification.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch_directml
3
+ from torch import nn
4
+ from torch.utils import data
5
+ from torch.utils.data import DataLoader
6
+ from torchvision import datasets
7
+ from torchvision.transforms import ToTensor, Lambda, Compose, transforms
8
+ import torchvision.models as models
9
+ import collections
10
+ import matplotlib.pyplot as plt
11
+ import argparse
12
+ import time
13
+ import os
14
+ import pathlib
15
+ import dataloader_classification
16
+ import torch.autograd.profiler as profiler
17
+ from PIL import Image
18
+ from os.path import exists
19
+
20
+ def get_checkpoint_folder(model_str, device):
21
+ device_str = 'dml' if device.type == 'privateuseone' else str(device)
22
+ checkpoint_folder = str(os.path.join(pathlib.Path(__file__).parent.parent.resolve(),
23
+ 'checkpoints', model_str, device_str))
24
+ os.makedirs(checkpoint_folder, exist_ok=True)
25
+ return str(os.path.join(checkpoint_folder, 'checkpoint.pth'))
26
+
27
+ def eval(dataloader, model_str, model, device, loss, highest_accuracy, save_model, trace):
28
+ size = len(dataloader.dataset)
29
+ num_batches = len(dataloader)
30
+
31
+ # Switch model to evaluation mode
32
+ model.eval()
33
+
34
+ test_loss, correct = 0, 0
35
+ with torch.no_grad():
36
+ for X, y in dataloader:
37
+ X = X.to(device)
38
+ y = y.to(device)
39
+
40
+ # Evaluate the model on the test input
41
+ if (trace):
42
+ with profiler.profile(record_shapes=True, with_stack=True, profile_memory=True) as prof:
43
+ with profiler.record_function("model_inference"):
44
+ pred = model(X)
45
+ print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=1000))
46
+ break
47
+ else:
48
+ pred = model(X)
49
+
50
+ test_loss += loss(pred, y).to("cpu")
51
+ correct += (pred.to("cpu").argmax(1) == y.to("cpu")).type(torch.float).sum()
52
+
53
+ if not trace:
54
+ test_loss /= num_batches
55
+ correct /= size
56
+
57
+ if (correct.item() > highest_accuracy):
58
+ highest_accuracy = correct.item()
59
+ print("current highest_accuracy: ", highest_accuracy)
60
+
61
+ # save model
62
+ if save_model:
63
+ state_dict = collections.OrderedDict()
64
+ for key in model.state_dict().keys():
65
+ state_dict[key] = model.state_dict()[key].to("cpu")
66
+ checkpoint = get_checkpoint_folder(model_str, device)
67
+ torch.save(state_dict, checkpoint)
68
+
69
+ print(f"Test Error: \n Accuracy: {(100*correct.item()):>0.1f}%, Avg loss: {test_loss.item():>8f} \n")
70
+
71
+ return highest_accuracy
72
+
73
+
74
+ def get_model(model_str, device):
75
+ if (model_str == 'squeezenet1_1'):
76
+ model = models.squeezenet1_1(num_classes=10).to(device)
77
+ elif (model_str == 'resnet50'):
78
+ model = models.resnet50(num_classes=10).to(device)
79
+ elif (model_str == 'squeezenet1_0'):
80
+ model = models.squeezenet1_0(num_classes=10).to(device)
81
+ elif (model_str == 'resnet18'):
82
+ model = models.resnet18(num_classes=10).to(device)
83
+ elif (model_str == 'alexnet'):
84
+ model = models.alexnet(num_classes=10).to(device)
85
+ elif (model_str == 'vgg16'):
86
+ model = models.vgg16(num_classes=10).to(device)
87
+ elif (model_str == 'densenet161'):
88
+ model = models.densenet161(num_classes=10).to(device)
89
+ elif (model_str == 'inception_v3'):
90
+ model = models.inception_v3(num_classes=10).to(device)
91
+ elif (model_str == 'googlenet'):
92
+ model = models.googlenet(num_classes=10).to(device)
93
+ elif (model_str == 'shufflenet_v2_x1_0'):
94
+ model = models.shufflenet_v2_x1_0(num_classes=10).to(device)
95
+ elif (model_str == 'mobilenet_v2'):
96
+ model = models.mobilenet_v2(num_classes=10).to(device)
97
+ elif (model_str == 'mobilenet_v3_large'):
98
+ model = models.mobilenet_v3_large(num_classes=10).to(device)
99
+ elif (model_str == 'mobilenet_v3_small'):
100
+ model = models.mobilenet_v3_small(num_classes=10).to(device)
101
+ elif (model_str == 'resnext50_32x4d'):
102
+ model = models.resnext50_32x4d(num_classes=10).to(device)
103
+ elif (model_str == 'wide_resnet50_2'):
104
+ model = models.wide_resnet50_2(num_classes=10).to(device)
105
+ elif (model_str == 'mnasnet1_0'):
106
+ model = models.mnasnet1_0(num_classes=10).to(device)
107
+ else:
108
+ raise Exception(f"Model {model_str} is not supported yet!")
109
+
110
+ checkpoint = get_checkpoint_folder(model_str, device)
111
+ if (exists(checkpoint)):
112
+ model.load_state_dict(torch.load(checkpoint))
113
+
114
+ return model
115
+
116
+ def preprocess(filename, device, input_size=1):
117
+ input_image = Image.open(filename)
118
+ preprocess_transform = dataloader_classification.create_testing_data_transform(input_size)
119
+ input_tensor = preprocess_transform(input_image)
120
+ input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
121
+ input_batch = input_batch.to(device)
122
+ return input_batch
123
+
124
+ def predict(filename, model_str, device):
125
+ # Get the model
126
+ model = get_model(model_str, device)
127
+ model.eval()
128
+
129
+ # Preprocess input
130
+ input = preprocess(filename, device)
131
+
132
+ # Evaluate
133
+ with torch.no_grad():
134
+ pred = model(input).to('cpu')
135
+
136
+ # The output has unnormalized scores. To get probabilities, you can run a softmax on it.
137
+ probabilities = torch.nn.functional.softmax(pred[0], dim=0)
138
+
139
+ data_folder = dataloader_classification.get_pytorch_data()
140
+ classes_file = str(os.path.join(data_folder, 'imagenet_classes.txt'))
141
+ with open(classes_file, "r") as f:
142
+ categories = [s.strip() for s in f.readlines()]
143
+ # Show top categories per image
144
+ top5_prob, top5_catid = torch.topk(probabilities, 5)
145
+ for i in range(top5_prob.size(0)):
146
+ print(categories[top5_catid[i]], top5_prob[i].item())
147
+
148
+
149
+ def main(path, batch_size, device, model_str, trace):
150
+ if trace:
151
+ if model_str == 'inception_v3':
152
+ batch_size = 3
153
+ else:
154
+ batch_size = 1
155
+
156
+ input_size = 299 if model_str == 'inception_v3' else 224
157
+
158
+ # Load the dataset
159
+ testing_dataloader = dataloader_classification.create_testing_dataloader(path, batch_size, input_size)
160
+
161
+ # Create the device
162
+ device = torch.device(device)
163
+
164
+ # Load the model on the device
165
+ start = time.time()
166
+
167
+ model = get_model(model_str, device)
168
+
169
+ print('Finished moving {} to device: {} in {}s.'.format(model_str, device, time.time() - start))
170
+
171
+ cross_entropy_loss = nn.CrossEntropyLoss().to(device)
172
+
173
+ # Test
174
+ highest_accuracy = eval(testing_dataloader,
175
+ model_str,
176
+ model,
177
+ device,
178
+ cross_entropy_loss,
179
+ 0,
180
+ False,
181
+ trace)
182
+
183
+
184
+ if __name__ == "__main__":
185
+ parser = argparse.ArgumentParser(__doc__)
186
+ parser.add_argument("--path", type=str, default="cifar-10-python", help="Path to cifar dataset.")
187
+ parser.add_argument('--batch_size', type=int, default=32, metavar='N', help='Batch size to train with.')
188
+ parser.add_argument('--device', type=str, default='dml', help='The device to use for training.')
189
+ parser.add_argument('--model', type=str, default='resnet18', help='The model to use.')
190
+ args = parser.parse_args()
191
+ device = torch_directml.device(torch_directml.default_device()) if args.device == 'dml' else torch.device(args.device)
192
+ main(args.path, args.batch_size, device, args.model())
pytorch_directml/train_classification.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+
4
+ import torch
5
+ import torch_directml
6
+ from torch import nn
7
+ from torch.utils.data import DataLoader
8
+ from torchvision import datasets
9
+ from torchvision.transforms import ToTensor, Lambda, Compose, transforms
10
+ import torchvision.models as models
11
+ import collections
12
+ import matplotlib.pyplot as plt
13
+ import argparse
14
+ import time
15
+ import os
16
+ import pathlib
17
+ import test_classification
18
+ import dataloader_classification
19
+ from test_classification import get_model
20
+ import torch.autograd.profiler as profiler
21
+
22
+ def select_device(device=''):
23
+ if device.lower() == 'cuda':
24
+ if not torch.cuda.is_available():
25
+ print ("torch.cuda not available")
26
+ return torch.device('cpu')
27
+ else:
28
+ return torch.device('cuda:0')
29
+ if device.lower() == 'dml':
30
+ return torch_directml.device(torch_directml.default_device())
31
+ else:
32
+ return torch.device('cpu')
33
+
34
+ def train(dataloader, model, device, loss, learning_rate, momentum, weight_decay, trace, model_str, ci_train):
35
+ size = len(dataloader.dataset)
36
+
37
+ # Define optimizer
38
+ optimizer = torch.optim.SGD(
39
+ model.parameters(),
40
+ lr=learning_rate,
41
+ momentum=momentum,
42
+ weight_decay=weight_decay)
43
+
44
+ optimize_after_batches = 1
45
+ start = time.time()
46
+ for batch, (X, y) in enumerate(dataloader):
47
+ X = X.to(device)
48
+ y = y.to(device)
49
+
50
+ if (trace):
51
+ with profiler.profile(record_shapes=True, with_stack=True, profile_memory=True) as prof:
52
+ with profiler.record_function("model_inference"):
53
+ # Compute loss and perform backpropagation
54
+ if (model_str == 'inception_v3'):
55
+ pred, _ = model(X)
56
+ elif (model_str == 'googlenet'):
57
+ pred, _, _ = model(X)
58
+ else:
59
+ pred = model(X)
60
+
61
+ batch_loss = loss(pred, y)
62
+ batch_loss.backward()
63
+
64
+ if batch % optimize_after_batches == 0:
65
+ optimizer.step()
66
+ optimizer.zero_grad()
67
+ print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=1000))
68
+ break
69
+ else:
70
+ # Compute loss and perform backpropagation
71
+ if (model_str == 'inception_v3'):
72
+ outputs, aux_outputs = model(X)
73
+ loss1 = loss(outputs, y)
74
+ loss2 = loss(aux_outputs, y)
75
+ batch_loss = loss1 + 0.4*loss2
76
+ elif (model_str == 'googlenet'):
77
+ outputs, aux_outputs_1, aux_outputs_2 = model(X)
78
+ loss1 = loss(outputs, y)
79
+ loss2 = loss(aux_outputs_1, y)
80
+ loss3 = loss(aux_outputs_2, y)
81
+ batch_loss = loss1 + 0.3*loss2 + 0.3*loss3
82
+ else:
83
+ pred = model(X)
84
+ batch_loss = loss(model(X), y)
85
+ batch_loss.backward()
86
+
87
+ if batch % optimize_after_batches == 0:
88
+ optimizer.step()
89
+ optimizer.zero_grad()
90
+
91
+ if (batch+1) % 100 == 0:
92
+ batch_loss_cpu, current = batch_loss.to('cpu'), (batch+1) * len(X)
93
+ print(f"loss: {batch_loss_cpu.item():>7f} [{current:>5d}/{size:>5d}] in {time.time() - start:>5f}s")
94
+ start = time.time()
95
+
96
+ if ci_train:
97
+ print(f"train [{len(X):>5d}/{size:>5d}] in {time.time() - start:>5f}s")
98
+ break
99
+
100
+
101
+ def main(path, batch_size, epochs, learning_rate,
102
+ momentum, weight_decay, device, model_str, save_model, trace, ci_train=False):
103
+ if trace:
104
+ if model_str == 'inception_v3':
105
+ batch_size = 3
106
+ else:
107
+ batch_size = 1
108
+ epochs = 1 if trace or ci_train else epochs
109
+
110
+ input_size = 299 if model_str == 'inception_v3' else 224
111
+
112
+ model = get_model(model_str, device)
113
+
114
+ # Load the dataset
115
+ training_dataloader = dataloader_classification.create_training_dataloader(path, batch_size, input_size)
116
+ testing_dataloader = dataloader_classification.create_testing_dataloader(path, batch_size, input_size)
117
+
118
+ # Load the model on the device
119
+ start = time.time()
120
+
121
+ print('Finished moving {} to device: {} in {}s.'.format(model_str, device, time.time() - start))
122
+
123
+ cross_entropy_loss = nn.CrossEntropyLoss().to(device)
124
+
125
+ highest_accuracy = 0
126
+
127
+ for t in range(epochs):
128
+ print(f"Epoch {t+1}\n-------------------------------")
129
+
130
+ # Train
131
+ train(training_dataloader,
132
+ model,
133
+ device,
134
+ cross_entropy_loss,
135
+ learning_rate,
136
+ momentum,
137
+ weight_decay,
138
+ trace,
139
+ model_str,
140
+ ci_train)
141
+
142
+ if not trace and not ci_train:
143
+ # Test
144
+ highest_accuracy = test_classification.eval(testing_dataloader,
145
+ model_str,
146
+ model,
147
+ device,
148
+ cross_entropy_loss,
149
+ highest_accuracy,
150
+ save_model,
151
+ False)
152
+
153
+ print("Done! with highest_accuracy: ", highest_accuracy)
154
+
155
+
156
+ if __name__ == "__main__":
157
+ parser = argparse.ArgumentParser(__doc__)
158
+ parser.add_argument("--path", type=str, default="cifar-10-python", help="Path to cifar dataset.")
159
+ parser.add_argument('--batch_size', type=int, default=32, metavar='N', help='Batch size to train with.')
160
+ parser.add_argument('--epochs', type=int, default=50, metavar='N', help='The number of epochs to train for.')
161
+ parser.add_argument('--learning_rate', type=float, default=0.001, metavar='LR', help='The learning rate.')
162
+ parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='The percentage of past parameters to store.')
163
+ parser.add_argument('--weight_decay', default=0.0001, type=float, help='The parameter to decay weights.')
164
+ parser.add_argument('--device', type=str, default='dml', help='The device to use for training.')
165
+ parser.add_argument('--model', type=str, default='', help='The model to use.')
166
+ parser.add_argument('--save_model', action='store_true', help='save model state_dict to file')
167
+ parser.add_argument('--trace', type=bool, default=False, help='Trace performance.')
168
+ args = parser.parse_args()
169
+
170
+ print (args)
171
+ device = select_device(args.device)
172
+ main(args.path, args.batch_size, args.epochs, args.learning_rate,
173
+ args.momentum, args.weight_decay, device, args.model, args.save_model, args.trace)
pytorch_study/day1_1.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch as t
2
+
3
+ import torch.nn as nn
4
+
5
+ import numpy as np
6
+
7
+ x = t.tensor(1., requires_grad=True)
8
+ w = t.tensor(2., requires_grad=True)
9
+ b = t.tensor(3., requires_grad=True)
10
+
11
+ y = w*x+b
12
+
13
+ y.backward()
14
+
15
+
16
+
17
+ print(x.grad)
18
+ print(w.grad)
19
+ print(b.grad)
pytorch_study/day1_2.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch as t
2
+ import torch.nn as nn
3
+
4
+ x = t.randn(4,3)
5
+ y = t.randn(4,2)
6
+
7
+ linear = nn.Linear(3,2)
8
+
9
+ print('w: ', linear.weight)
10
+ print('b: ', linear.bias)
11
+
12
+ criterion = nn.MSELoss()
13
+ optimizer = t.optim.SGD(linear.parameters(), lr=0.01)
14
+
15
+ pred = linear(x)
16
+
17
+ loss = criterion(pred, y)
18
+ print('loss: ', loss.item())
19
+
20
+ loss.backward()
21
+
22
+ print('dL/dw: ', linear.weight.grad)
23
+ print('dL/db: ', linear.bias.grad)
24
+
25
+
26
+ optimizer.step()
27
+
28
+ pred = linear(x)
29
+ loss = criterion(pred, y)
30
+
31
+ print('loss after 1 step optimization', loss.item())
pytorch_study/day2_1.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import torch as t
2
+
3
+ x = t.tensor([[1,1], [2,2], [3,3], [4,4]])
4
+ y = x.pow(1)
5
+
6
+ print(y)
pytorch_study/day3_1.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import torch
3
+ import numpy
4
+ import torch.nn as nn
5
+ import torch.optim as optim
6
+ from sklearn.feature_extraction.text import CountVectorizer
7
+
8
+ # ۱. داده‌ها را ایجاد و ذخیره کنید
9
+ data = {
10
+ "text": [
11
+ "This movie was great",
12
+ "I did not like this movie",
13
+ "The acting was terrible",
14
+ "I loved the plot",
15
+ "It was a boring experience",
16
+ "What a fantastic film!",
17
+ "I hated it",
18
+ "It was okay",
19
+ "Absolutely wonderful!",
20
+ "Not my favorite"
21
+ "Was very good"
22
+ "Very good"
23
+ ],
24
+ "label": [
25
+ 1, # Positive
26
+ 0, # Negative
27
+ 0, # Negative
28
+ 1, # Positive
29
+ 0, # Negative
30
+ 1, # Positive
31
+ 0, # Negative
32
+ 0, # Negative
33
+ 1, # Positive
34
+ 0,
35
+ 1,
36
+ 1 # Negative
37
+ ]
38
+ }
39
+
40
+ # تبدیل دیکشنری به DataFrame و ذخیره در CSV
41
+ df = pd.DataFrame(data)
42
+ df.to_csv("data.csv", index=False)
43
+
44
+ # ۲. خواندن و پردازش داده‌ها
45
+ df = pd.read_csv("data.csv")
46
+
47
+ # ۳. تبدیل کلمات به اعداد (Tokenization)
48
+ vectorizer = CountVectorizer()
49
+ X = vectorizer.fit_transform(df["text"]).toarray()
50
+ y = df["label"].values
51
+
52
+ # تبدیل داده‌ها به Tensor
53
+ X_tensor = torch.tensor(X, dtype=torch.float32)
54
+ y_tensor = torch.tensor(y, dtype=torch.float32).view(-1, 1)
55
+
56
+ # ۴. ساخت مدل شبکه عصبی
57
+ class SentimentAnalysisModel(nn.Module):
58
+ def __init__(self, input_size):
59
+ super(SentimentAnalysisModel, self).__init__()
60
+ self.fc1 = nn.Linear(input_size, 8) # لایه‌ی مخفی با ۸ نورون
61
+ self.fc2 = nn.Linear(8, 1) # خروجی (یک مقدار بین ۰ و ۱)
62
+ self.relu = nn.ReLU() # تابع فعال‌ساز
63
+
64
+ def forward(self, x):
65
+ x = self.relu(self.fc1(x))
66
+ x = torch.sigmoid(self.fc2(x)) # تابع سیگموید برای خروجی بین ۰ و ۱
67
+ return x
68
+
69
+ # ۵. تنظیم تابع هزینه و بهینه‌ساز
70
+ input_size = X.shape[1] # تعداد ویژگی‌ها (کلمات منحصر به فرد)
71
+ model = SentimentAnalysisModel(input_size)
72
+
73
+ criterion = nn.BCELoss() # تابع هزینه برای دسته‌بندی دودویی
74
+ optimizer = optim.Adam(model.parameters(), lr=0.01) # نرخ یادگیری ۰.۰۱
75
+
76
+ # ۶. آموزش مدل
77
+ epochs = 100
78
+
79
+ for epoch in range(epochs):
80
+ # ۱. پیش‌بینی مدل
81
+ y_pred = model(X_tensor)
82
+
83
+ # ۲. محاسبه‌ی هزینه (Loss)
84
+ loss = criterion(y_pred, y_tensor)
85
+
86
+ # ۳. پاک کردن گرادیان‌های قبلی
87
+ optimizer.zero_grad()
88
+
89
+ # ۴. محاسبه‌ی گرادیان‌ها و بروزرسانی وزن‌ها
90
+ loss.backward()
91
+ optimizer.step()
92
+
93
+ # ۵. نمایش میزان خطا هر ۱۰ مرحله
94
+ if (epoch+1) % 10 == 0:
95
+ print(f"Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}")
96
+
97
+ # ۷. تست مدل
98
+ def predict_sentiment(text):
99
+ # تبدیل متن ورودی به بردار ویژگی‌ها
100
+ text_vectorized = vectorizer.transform([text]).toarray()
101
+ text_tensor = torch.tensor(text_vectorized, dtype=torch.float32)
102
+
103
+ # پیش‌بینی مدل
104
+ output = model(text_tensor)
105
+
106
+ # تبدیل مقدار خروجی به برچسب ۰ یا ۱
107
+ prediction = 1 if output.item() > 0.5 else 0
108
+
109
+ return "Positive" if prediction == 1 else "Negative"
110
+
111
+ # 🔹 تست روی چند جمله جدید
112
+ print(predict_sentiment("I really enjoyed this movie!"))
113
+ print(predict_sentiment("This was the worst experience ever."))
114
+ print(predict_sentiment("It was just okay, nothing special."))
115
+ print(predict_sentiment("Absolutely loved the storyline!"))
pytorch_study/day3_2.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from sklearn.model_selection import train_test_split
3
+ from sklearn.preprocessing import LabelEncoder
4
+ from sklearn.feature_extraction.text import CountVectorizer
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.optim as optim
8
+ from sklearn.metrics import accuracy_score, classification_report
9
+
10
+ # 1. Load data
11
+ data = pd.read_csv('data.csv') # Replace 'data.csv' with your dataset
12
+ print("Columns in the dataset:", data.columns) # Check column names
13
+
14
+ # 2. Preprocess data
15
+ X = data['text'] # Assuming your text column is named 'text'
16
+ y = data['label'] # Assuming your label column is named 'label'
17
+
18
+ # Encode labels
19
+ label_encoder = LabelEncoder()
20
+ y_encoded = label_encoder.fit_transform(y)
21
+
22
+ # Split data into training and testing sets
23
+ X_train, X_test, y_train, y_test = train_test_split(X, y_encoded, test_size=0.2, random_state=42)
24
+
25
+ # Convert text to numerical features
26
+ vectorizer = CountVectorizer()
27
+ X_train_vectorized = vectorizer.fit_transform(X_train)
28
+ X_test_vectorized = vectorizer.transform(X_test)
29
+
30
+ # 3. Define the neural network model
31
+ class SentimentModel(nn.Module):
32
+ def __init__(self, input_size, hidden_size, output_size):
33
+ super(SentimentModel, self).__init__()
34
+ self.fc1 = nn.Linear(input_size, hidden_size)
35
+ self.relu = nn.ReLU()
36
+ self.fc2 = nn.Linear(hidden_size, output_size)
37
+
38
+ def forward(self, x):
39
+ x = self.fc1(x)
40
+ x = self.relu(x)
41
+ x = self.fc2(x)
42
+ return x
43
+
44
+ # Initialize model
45
+ input_size = X_train_vectorized.shape[1]
46
+ hidden_size = 512
47
+ output_size = len(label_encoder.classes_)
48
+ model = SentimentModel(input_size, hidden_size, output_size)
49
+
50
+ # 4. Define loss function and optimizer
51
+ criterion = nn.CrossEntropyLoss()
52
+ optimizer = optim.Adam(model.parameters(), lr=0.001)
53
+
54
+ # 5. Train the model
55
+ num_epochs = 1000
56
+ for epoch in range(num_epochs):
57
+ model.train()
58
+ optimizer.zero_grad()
59
+ outputs = model(torch.FloatTensor(X_train_vectorized.toarray()))
60
+ loss = criterion(outputs, torch.LongTensor(y_train))
61
+ loss.backward()
62
+ optimizer.step()
63
+ print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
64
+
65
+ # 6. Evaluate the model
66
+ model.eval()
67
+ with torch.no_grad():
68
+ test_outputs = model(torch.FloatTensor(X_test_vectorized.toarray()))
69
+ _, predicted = torch.max(test_outputs, 1)
70
+
71
+ # Calculate accuracy
72
+ accuracy = accuracy_score(y_test, predicted.numpy())
73
+ print(f'Accuracy: {accuracy:.4f}')
74
+
75
+ # Detailed classification report
76
+ print(classification_report(y_test, predicted.numpy(), target_names=label_encoder.classes_))
77
+
78
+ # 7. Test the model with new sample inputs
79
+ def predict_sentiment(text):
80
+ # Vectorize the input text
81
+ text_vectorized = vectorizer.transform([text])
82
+ with torch.no_grad():
83
+ output = model(torch.FloatTensor(text_vectorized.toarray()))
84
+ _, predicted = torch.max(output, 1)
85
+ return label_encoder.inverse_transform(predicted.numpy())[0]
86
+
87
+ # Test the model with new sentences
88
+ new_samples = [
89
+ "It is very good",
90
+ "Bad",
91
+ "Good",
92
+ "loving you",
93
+ "Loving you",
94
+ "love you",
95
+ "Love you",
96
+ "Very bad",
97
+ "I love you",
98
+ "Fuck",
99
+ "fuck",
100
+ "bad store",
101
+ "i dont love this",
102
+ "not like this"
103
+ ]
104
+
105
+ for sample in new_samples:
106
+ sentiment = predict_sentiment(sample)
107
+ print(f'Text: "{sample}" -> Predicted Sentiment: {sentiment}')
pytorch_study/day3_3.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.optim as optim
4
+ import numpy as np
5
+ import random
6
+ # خواندن داده‌ها از فایل
7
+ with open("data.txt", "r", encoding="utf-8") as f:
8
+ text = f.read()
9
+
10
+ # ایجاد دیکشنری برای تبدیل کاراکترها به اندیس و برعکس
11
+ chars = sorted(list(set(text)))
12
+ char_to_idx = {ch: i for i, ch in enumerate(chars)}
13
+ idx_to_char = {i: ch for i, ch in enumerate(chars)}
14
+
15
+ # تبدیل متن به لیست از اندیس‌ها
16
+ data = [char_to_idx[ch] for ch in text]
17
+
18
+ # تنظیم پارامترهای آموزشی
19
+ seq_length = 50 # طول دنباله ورودی
20
+ batch_size = 64
21
+ hidden_size = 128
22
+ num_layers = 2
23
+ num_epochs = 100
24
+ learning_rate = 0.01
25
+ class TextDataset(torch.utils.data.Dataset):
26
+ def __init__(self, data, seq_length):
27
+ self.data = data
28
+ self.seq_length = seq_length
29
+
30
+ def __len__(self):
31
+ return len(self.data) - self.seq_length
32
+
33
+ def __getitem__(self, idx):
34
+ return (
35
+ torch.tensor(self.data[idx:idx+self.seq_length], dtype=torch.long),
36
+ torch.tensor(self.data[idx+1:idx+self.seq_length+1], dtype=torch.long)
37
+ )
38
+
39
+ dataset = TextDataset(data, seq_length)
40
+ dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
41
+ class LSTMModel(nn.Module):
42
+ def __init__(self, vocab_size, hidden_size, num_layers):
43
+ super(LSTMModel, self).__init__()
44
+ self.embedding = nn.Embedding(vocab_size, hidden_size)
45
+ self.lstm = nn.LSTM(hidden_size, hidden_size, num_layers, batch_first=True)
46
+ self.fc = nn.Linear(hidden_size, vocab_size)
47
+
48
+ def forward(self, x, hidden=None):
49
+ x = self.embedding(x)
50
+ output, hidden = self.lstm(x, hidden)
51
+ output = self.fc(output)
52
+ return output, hidden
53
+
54
+ vocab_size = len(chars)
55
+ model = LSTMModel(vocab_size, hidden_size, num_layers)
56
+ criterion = nn.CrossEntropyLoss()
57
+ optimizer = optim.Adam(model.parameters(), lr=learning_rate)
58
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
59
+ model.to(device)
60
+
61
+ for epoch in range(num_epochs):
62
+ hidden = None # مقدار اولیه hidden
63
+
64
+ for inputs, targets in dataloader:
65
+ inputs, targets = inputs.to(device), targets.to(device)
66
+ optimizer.zero_grad()
67
+
68
+ # Forward pass
69
+ outputs, hidden = model(inputs, hidden)
70
+
71
+ # Detach hidden state to avoid graph dependency issues
72
+ hidden = (hidden[0].detach(), hidden[1].detach())
73
+
74
+ # Compute loss
75
+ loss = criterion(outputs.view(-1, vocab_size), targets.view(-1))
76
+
77
+ # Backpropagation
78
+ loss.backward()
79
+ optimizer.step()
80
+
81
+ print(f"Epoch {epoch+1}/{num_epochs}, Loss: {loss.item():.4f}")
82
+
83
+ print(f"Epoch {epoch+1}/{num_epochs}, Loss: {total_loss / len(dataloader):.4f}")
84
+ def generate_text(model, start_text, length=200):
85
+ model.eval()
86
+ input_seq = torch.tensor([char_to_idx[ch] for ch in start_text], dtype=torch.long).unsqueeze(0).to(device)
87
+ hidden = None
88
+ generated_text = start_text
89
+
90
+ for _ in range(length):
91
+ output, hidden = model(input_seq, hidden)
92
+ next_char_idx = torch.argmax(output[:, -1, :]).item()
93
+ generated_text += idx_to_char[next_char_idx]
94
+ input_seq = torch.cat([input_seq[:, 1:], torch.tensor([[next_char_idx]], dtype=torch.long).to(device)], dim=1)
95
+
96
+ return generated_text
97
+
98
+ # تست تولید متن
99
+ start_text = "Once upon a time"
100
+ print(generate_text(model, start_text, 200))
pytorch_study/day4_1.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ class simple_sd_model (nn.Module):
5
+ def __init__(self):
6
+ super(simple_sd_model, self).__init__()
7
+ self.fc1 = nn.Linear (1,1)
8
+ self.fc2 = nn.Linear (1,6)
9
+ self.fc3 = nn.Linear (6,7)
10
+ self.fc4 = nn.Linear (7,4)
11
+
12
+ def forward(self, x):
13
+ x = F.relu (self.fc1(x))
14
+ x = F.relu (self.fc2(x))
15
+ x = self.fc3(x)
16
+ x = self.fc4(x)
17
+ return x
18
+
19
+ model = simple_sd_model()
20
+ input_data = torch.rand (10, 1, requires_grad= True)
21
+ output_data = model(input_data)
22
+ print (output_data)
23
+
24
+
25
+
26
+
pytorch_study/day4_2.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.optim as optim
4
+
5
+ # ⚡ 1. تعریف شبکه عصبی
6
+ class SimpleNN(nn.Module):
7
+ def __init__(self, input_size, hidden_size, output_size):
8
+ super(SimpleNN, self).__init__()
9
+ self.hidden = nn.Linear(input_size, hidden_size) # لایه پنهان
10
+ self.activation = nn.ReLU() # تابع فعال‌سازی
11
+ self.output = nn.Linear(hidden_size, output_size) # لایه خروجی
12
+
13
+ def forward(self, x):
14
+ x = self.hidden(x)
15
+ x = self.activation(x)
16
+ x = self.output(x)
17
+ return x
18
+
19
+ # ⚡ 2. تنظیمات مدل
20
+ input_size = 10 # تعداد ویژگی‌های ورودی (مثلاً 4 ویژگی برای هر داده)
21
+ hidden_size = 4 # تعداد نورون‌های لایه پنهان
22
+ output_size = 4 # تعداد کلاس‌ها (مثلاً 3 کلاس)
23
+
24
+ model = SimpleNN(input_size, hidden_size, output_size)
25
+ print(model)
26
+
27
+ # ⚡ 3. تعریف تابع هزینه و بهینه‌ساز
28
+ criterion = nn.CrossEntropyLoss() # مناسب برای مسائل طبقه‌بندی
29
+ optimizer = optim.Adam(model.parameters(), lr=0.01) # الگوریتم بهینه‌سازی
30
+
31
+ # ⚡ 4. داده‌های ورودی ساختگی
32
+ X_train = torch.rand(10, input_size) # 5 نمونه، هر نمونه دارای 4 ویژگی
33
+ y_train = torch.tensor([0, 1, 2, 1, 0, 1, 2, 1, 0, 1]) # برچسب‌های کلاس (0، 1 یا 2)
34
+
35
+ # ⚡ 5. آموزش مدل (یک epoch برای مثال)
36
+ for i in range(1):
37
+ optimizer.zero_grad() # تنظیم گرادیان‌ها به صفر
38
+ outputs = model(X_train) # عبور داده‌ها از شبکه
39
+ loss = criterion(outputs, y_train) # محاسبه خطا
40
+ loss.backward() # محاسبه گرادیان‌ها
41
+ optimizer.step() # بروزرسانی وزن‌ها
42
+
43
+ # ⚡ 6. نمایش خروجی و خطا
44
+ print(f"Output:\n{outputs}")
45
+ print(f"Loss: {loss.item()}")
pytorch_study/day5_1.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import torch.optim as optim
5
+
6
+ class neural_network (nn.Module):
7
+ def __init__(self, input_dim, hidden_dim, output_dim):
8
+ super(neural_network, self).__init__()
9
+ self.hidden = nn.Linear (input_dim, hidden_dim)
10
+ self.act = nn.ReLU()
11
+ self.output = nn.Linear (hidden_dim, output_dim)
12
+
13
+ def forward (self, x):
14
+ x = self.hidden (x)
15
+ x = self.act (x)
16
+ x = self.output (x)
17
+ return x
18
+
19
+
20
+ input_dim = 4
21
+ hidden_dim = 32
22
+ output_dim = 4
23
+
24
+ model = neural_network(input_dim, hidden_dim, output_dim)
25
+ print(model)
26
+
27
+ criterion = nn.CrossEntropyLoss()
28
+ optimizer = optim.Adam(model.parameters(), lr= 0.01)
simple_gui_find_largfiles/script.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tkinter as tk
2
+ from tkinter import filedialog, messagebox
3
+ from pathlib import Path
4
+ import os
5
+
6
+ class LargeFileFinder:
7
+ def __init__(self, root):
8
+ self.root = root
9
+ self.root.title("Large File Finder")
10
+
11
+ # Size selection
12
+ size_label = tk.Label(self.root, text="Select File Size:")
13
+ size_label.pack()
14
+
15
+ self.size_entry = tk.Entry(self.root, width=100)
16
+ self.size_entry.pack()
17
+
18
+ # Find button
19
+ find_button = tk.Button(self.root, text="Find Large Files", command=self.find_large_files)
20
+ find_button.pack()
21
+
22
+ # Results label
23
+ self.result_label = tk.Label(self.root, text="Results will appear here", wraplength=400)
24
+ self.result_label.pack()
25
+
26
+ def find_large_files(self):
27
+ directory = filedialog.askdirectory()
28
+ if not directory:
29
+ return
30
+
31
+ file_size = self.size_entry.get()
32
+ if not file_size:
33
+ messagebox.showwarning("Warning", "Please enter a file size.")
34
+ return
35
+
36
+ try:
37
+ size_in_bytes = float(file_size)
38
+ except ValueError:
39
+ messagebox.showwarning("Warning", "Invalid file size format.")
40
+ return
41
+
42
+ self.result_label.config(text="Searching for large files...")
43
+
44
+ results = self.get_large_files(directory, size_in_bytes)
45
+
46
+ if results:
47
+ self.result_label.config(text="Found large files:\n" + results)
48
+ else:
49
+ self.result_label.config(text="No large files found.")
50
+
51
+ def get_large_files(self, directory, size):
52
+ large_files = []
53
+ for path in Path(directory).iterdir():
54
+ if path.is_file() and path.stat().st_size > size:
55
+ large_files.append(path.as_posix())
56
+ return "\n".join(large_files)
57
+
58
+
59
+ if __name__ == "__main__":
60
+ root = tk.Tk()
61
+ LargeFileFinder(root)
62
+ root.mainloop()