Image Classification
Transformers
English
art
litav commited on
Commit
14cad36
verified
1 Parent(s): 94ce1e2

Update vit_model_test.py

Browse files
Files changed (1) hide show
  1. vit_model_test.py +12 -7
vit_model_test.py CHANGED
@@ -10,13 +10,14 @@ from sklearn.metrics import accuracy_score, precision_score, confusion_matrix, f
10
  import matplotlib.pyplot as plt
11
  import seaborn as sns
12
 
13
- # 驻讜谞拽爪讬讛 诇讛爪讙转 住专讟讜谉
14
  def display_video(video_url):
15
- video_html = f'''
16
- <iframe width="560" height="315" src="{video_url}" frameborder="0" allowfullscreen></iframe>
 
 
 
17
  '''
18
- # 讛谞讞 讗转 讛-HTML 讘讚砖讘讜专讚 砖诇讱
19
- return video_html
20
 
21
  def shuffle_and_split_data(dataframe, test_size=0.2, random_state=59):
22
  shuffled_df = dataframe.sample(frac=1, random_state=random_state).reset_index(drop=True)
@@ -55,15 +56,19 @@ if __name__ == "__main__":
55
  predicted_labels = []
56
 
57
  # 拽讬砖讜专 诇住专讟讜谉
58
- video_url = 'https://youtube.com/shorts/vGRq060nPYU?feature=share' # 讛讞诇讬驻讬 讻讗谉 注诐 讛-URL 砖诇 讛住专讟讜谉 砖诇讱
59
  video_html = display_video(video_url)
60
 
61
  # 讛专讗讬 讗转 讛住专讟讜谉 诇驻谞讬 讛讞讬讝讜讬
62
- print(video_html) # 讛爪讙 讗转 讛-HTML 讘讚砖讘讜专讚 砖诇讱
63
 
64
  with torch.no_grad():
65
  for images, labels in test_loader:
66
  images, labels = images.to(device), labels.to(device)
 
 
 
 
67
  outputs = model(images)
68
  logits = outputs.logits # Extract logits from the output
69
  _, predicted = torch.max(logits, 1)
 
10
  import matplotlib.pyplot as plt
11
  import seaborn as sns
12
 
13
+ # 驻讜谞拽爪讬讛 诇讛讞讝专转 HTML 砖诇 住专讟讜谉
14
  def display_video(video_url):
15
+ return f'''
16
+ <video width="640" height="480" controls autoplay>
17
+ <source src="{video_url}" type="video/mp4">
18
+ Your browser does not support the video tag.
19
+ </video>
20
  '''
 
 
21
 
22
  def shuffle_and_split_data(dataframe, test_size=0.2, random_state=59):
23
  shuffled_df = dataframe.sample(frac=1, random_state=random_state).reset_index(drop=True)
 
56
  predicted_labels = []
57
 
58
  # 拽讬砖讜专 诇住专讟讜谉
59
+ video_url = 'https://www.youtube.com/shorts/vGRq060nPYU?feature=share' # 讛讞诇讬驻讬 讻讗谉 注诐 讛-URL 砖诇 讛住专讟讜谉 砖诇讱
60
  video_html = display_video(video_url)
61
 
62
  # 讛专讗讬 讗转 讛住专讟讜谉 诇驻谞讬 讛讞讬讝讜讬
63
+ print(video_html) # 讝讛 讗诪讜专 诇讛爪讬讙 讗转 讛-HTML 讘讚砖讘讜专讚 砖诇讱
64
 
65
  with torch.no_grad():
66
  for images, labels in test_loader:
67
  images, labels = images.to(device), labels.to(device)
68
+
69
+ # 讛专讗讛 讗转 讛住专讟讜谉 讘注转 讞讬讝讜讬
70
+ print(video_html) # 讛爪讙 讗转 讛-HTML 砖诇 讛住专讟讜谉
71
+
72
  outputs = model(images)
73
  logits = outputs.logits # Extract logits from the output
74
  _, predicted = torch.max(logits, 1)