Spaces:
Running
Running
Joshua Lochner
commited on
Commit
·
09cabec
1
Parent(s):
e3d3d3f
Fix conflicting `--no_cuda` argument
Browse files- src/evaluate.py +2 -2
- src/model.py +0 -2
- src/predict.py +5 -6
- src/shared.py +2 -0
- src/train.py +1 -1
src/evaluate.py
CHANGED
@@ -138,7 +138,7 @@ def main():
|
|
138 |
GeneralArguments
|
139 |
))
|
140 |
|
141 |
-
evaluation_args, dataset_args, segmentation_args, classifier_args,
|
142 |
|
143 |
# Load labelled data:
|
144 |
final_path = os.path.join(
|
@@ -150,7 +150,7 @@ def main():
|
|
150 |
return
|
151 |
|
152 |
model, tokenizer = get_model_tokenizer(
|
153 |
-
evaluation_args.model_path, evaluation_args.cache_dir,
|
154 |
|
155 |
with open(final_path) as fp:
|
156 |
final_data = json.load(fp)
|
|
|
138 |
GeneralArguments
|
139 |
))
|
140 |
|
141 |
+
evaluation_args, dataset_args, segmentation_args, classifier_args, general_args = hf_parser.parse_args_into_dataclasses()
|
142 |
|
143 |
# Load labelled data:
|
144 |
final_path = os.path.join(
|
|
|
150 |
return
|
151 |
|
152 |
model, tokenizer = get_model_tokenizer(
|
153 |
+
evaluation_args.model_path, evaluation_args.cache_dir, general_args.no_cuda)
|
154 |
|
155 |
with open(final_path) as fp:
|
156 |
final_data = json.load(fp)
|
src/model.py
CHANGED
@@ -23,8 +23,6 @@ class ModelArguments:
|
|
23 |
'help': 'Path to pretrained model or model identifier from huggingface.co/models'
|
24 |
}
|
25 |
)
|
26 |
-
no_cuda: bool = field(default=False, metadata={
|
27 |
-
'help': 'Do not use CUDA even when it is available'})
|
28 |
|
29 |
# config_name: Optional[str] = field( # TODO remove?
|
30 |
# default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'}
|
|
|
23 |
'help': 'Path to pretrained model or model identifier from huggingface.co/models'
|
24 |
}
|
25 |
)
|
|
|
|
|
26 |
|
27 |
# config_name: Optional[str] = field( # TODO remove?
|
28 |
# default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'}
|
src/predict.py
CHANGED
@@ -10,7 +10,7 @@ import logging
|
|
10 |
import os
|
11 |
import itertools
|
12 |
from utils import re_findall
|
13 |
-
from shared import CustomTokens, START_SEGMENT_TEMPLATE, END_SEGMENT_TEMPLATE, OutputArguments, seconds_to_time
|
14 |
from typing import Optional
|
15 |
from segment import (
|
16 |
generate_segments,
|
@@ -115,8 +115,6 @@ class InferenceArguments:
|
|
115 |
output_as_json: bool = field(default=False, metadata={
|
116 |
'help': 'Output evaluations as JSON'})
|
117 |
|
118 |
-
no_cuda: bool = ModelArguments.__dataclass_fields__['no_cuda']
|
119 |
-
|
120 |
def __post_init__(self):
|
121 |
# Try to load model from latest checkpoint
|
122 |
if self.model_path is None:
|
@@ -398,9 +396,10 @@ def main():
|
|
398 |
hf_parser = HfArgumentParser((
|
399 |
PredictArguments,
|
400 |
SegmentationArguments,
|
401 |
-
ClassifierArguments
|
|
|
402 |
))
|
403 |
-
predict_args, segmentation_args, classifier_args = hf_parser.parse_args_into_dataclasses()
|
404 |
|
405 |
if not predict_args.video_ids:
|
406 |
logger.error(
|
@@ -408,7 +407,7 @@ def main():
|
|
408 |
return
|
409 |
|
410 |
model, tokenizer = get_model_tokenizer(
|
411 |
-
predict_args.model_path, predict_args.cache_dir,
|
412 |
|
413 |
for video_id in predict_args.video_ids:
|
414 |
video_id = video_id.strip()
|
|
|
10 |
import os
|
11 |
import itertools
|
12 |
from utils import re_findall
|
13 |
+
from shared import CustomTokens, START_SEGMENT_TEMPLATE, END_SEGMENT_TEMPLATE, GeneralArguments, OutputArguments, seconds_to_time
|
14 |
from typing import Optional
|
15 |
from segment import (
|
16 |
generate_segments,
|
|
|
115 |
output_as_json: bool = field(default=False, metadata={
|
116 |
'help': 'Output evaluations as JSON'})
|
117 |
|
|
|
|
|
118 |
def __post_init__(self):
|
119 |
# Try to load model from latest checkpoint
|
120 |
if self.model_path is None:
|
|
|
396 |
hf_parser = HfArgumentParser((
|
397 |
PredictArguments,
|
398 |
SegmentationArguments,
|
399 |
+
ClassifierArguments,
|
400 |
+
GeneralArguments
|
401 |
))
|
402 |
+
predict_args, segmentation_args, classifier_args, general_args = hf_parser.parse_args_into_dataclasses()
|
403 |
|
404 |
if not predict_args.video_ids:
|
405 |
logger.error(
|
|
|
407 |
return
|
408 |
|
409 |
model, tokenizer = get_model_tokenizer(
|
410 |
+
predict_args.model_path, predict_args.cache_dir, general_args.no_cuda)
|
411 |
|
412 |
for video_id in predict_args.video_ids:
|
413 |
video_id = video_id.strip()
|
src/shared.py
CHANGED
@@ -99,6 +99,8 @@ class GeneralArguments:
|
|
99 |
seed: Optional[int] = field(default_factory=seed_factory, metadata={
|
100 |
'help': 'Set seed for deterministic training and testing. By default, it uses the current time (results in essentially random results).'
|
101 |
})
|
|
|
|
|
102 |
|
103 |
def __post_init__(self):
|
104 |
random.seed(self.seed)
|
|
|
99 |
seed: Optional[int] = field(default_factory=seed_factory, metadata={
|
100 |
'help': 'Set seed for deterministic training and testing. By default, it uses the current time (results in essentially random results).'
|
101 |
})
|
102 |
+
no_cuda: bool = field(default=False, metadata={
|
103 |
+
'help': 'Do not use CUDA even when it is available'})
|
104 |
|
105 |
def __post_init__(self):
|
106 |
random.seed(self.seed)
|
src/train.py
CHANGED
@@ -297,7 +297,7 @@ def main():
|
|
297 |
|
298 |
from model import get_model_tokenizer
|
299 |
model, tokenizer = get_model_tokenizer(
|
300 |
-
model_args.model_name_or_path, model_args.cache_dir,
|
301 |
# max_tokenizer_length = model.config.d_model
|
302 |
|
303 |
# Preprocessing the datasets.
|
|
|
297 |
|
298 |
from model import get_model_tokenizer
|
299 |
model, tokenizer = get_model_tokenizer(
|
300 |
+
model_args.model_name_or_path, model_args.cache_dir, training_args.no_cuda)
|
301 |
# max_tokenizer_length = model.config.d_model
|
302 |
|
303 |
# Preprocessing the datasets.
|