Spaces:
Running
Running
fixes progress bar when using the 'verbose=True' argument.
Browse filesPROBLEM:
previously, this flag resulted in multiple progress bars being
generated. this was due to having a progress bar for both the main
loop and for the encoding step.
SOLUTION:
removed the progress bar for the encoding step. also added argument
to main progress bar so that a bar actually shows and not just a
timer.
- encoder_models.py +0 -1
- semncg.py +6 -1
encoder_models.py
CHANGED
@@ -64,7 +64,6 @@ class SBertEncoder(Encoder):
|
|
64 |
prediction,
|
65 |
device=self.device,
|
66 |
batch_size=self.batch_size,
|
67 |
-
show_progress_bar=self.verbose,
|
68 |
)
|
69 |
|
70 |
return embeddings
|
|
|
64 |
prediction,
|
65 |
device=self.device,
|
66 |
batch_size=self.batch_size,
|
|
|
67 |
)
|
68 |
|
69 |
return embeddings
|
semncg.py
CHANGED
@@ -441,6 +441,11 @@ class SemNCG(evaluate.Metric):
|
|
441 |
|
442 |
# Validate inputs corresponding to flags
|
443 |
_validate_input_format(tokenize_sentences, predictions, references, documents)
|
|
|
|
|
|
|
|
|
|
|
444 |
|
445 |
# Get GPU
|
446 |
device = get_gpu(gpu)
|
@@ -479,7 +484,7 @@ class SemNCG(evaluate.Metric):
|
|
479 |
iterable_obj = zip(predictions, references, documents)
|
480 |
|
481 |
out = []
|
482 |
-
for idx, (pred, ref, doc) in enumerate(
|
483 |
|
484 |
if not pre_compute_embeddings: # Compute embeddings
|
485 |
ref_sentences = tokenize_and_prep_document(ref, tokenize_sentences)
|
|
|
441 |
|
442 |
# Validate inputs corresponding to flags
|
443 |
_validate_input_format(tokenize_sentences, predictions, references, documents)
|
444 |
+
|
445 |
+
try:
|
446 |
+
N = len(predictions)
|
447 |
+
except Exception as e:
|
448 |
+
N = None
|
449 |
|
450 |
# Get GPU
|
451 |
device = get_gpu(gpu)
|
|
|
484 |
iterable_obj = zip(predictions, references, documents)
|
485 |
|
486 |
out = []
|
487 |
+
for idx, (pred, ref, doc) in tqdm(enumerate(iterable_obj), total=N):
|
488 |
|
489 |
if not pre_compute_embeddings: # Compute embeddings
|
490 |
ref_sentences = tokenize_and_prep_document(ref, tokenize_sentences)
|