Nikita Glazkov commited on
Commit
631481e
·
1 Parent(s): bfe8c26

test config

Browse files
Files changed (3) hide show
  1. app.py +1 -2
  2. mars.py +14 -11
  3. tests.py +4 -16
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import evaluate
2
  from evaluate.utils import launch_gradio_widget
3
 
4
-
5
  module = evaluate.load("Glazkov/mars")
6
- launch_gradio_widget(module)
 
1
  import evaluate
2
  from evaluate.utils import launch_gradio_widget
3
 
 
4
  module = evaluate.load("Glazkov/mars")
5
+ launch_gradio_widget(module)
mars.py CHANGED
@@ -13,9 +13,8 @@
13
  # limitations under the License.
14
  """TODO: Add a description here."""
15
 
16
- import evaluate
17
  import datasets
18
-
19
 
20
  # TODO: Add BibTeX citation
21
  _CITATION = """\
@@ -61,7 +60,7 @@ BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
61
  class MARS(evaluate.Metric):
62
  """TODO: Short description of my evaluation module."""
63
 
64
- def _info(self):
65
  # TODO: Specifies the evaluate.EvaluationModuleInfo object
66
  return evaluate.MetricInfo(
67
  # This is the description that will appear on the modules page.
@@ -70,15 +69,17 @@ class MARS(evaluate.Metric):
70
  citation=_CITATION,
71
  inputs_description=_KWARGS_DESCRIPTION,
72
  # This defines the format of each prediction and reference
73
- features=datasets.Features({
74
- 'predictions': datasets.Value('int64'),
75
- 'references': datasets.Value('int64'),
76
- }),
 
 
77
  # Homepage of the module for documentation
78
  homepage="http://module.homepage",
79
  # Additional links to the codebase or references
80
  codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
81
- reference_urls=["http://path.to.reference.url/new_module"]
82
  )
83
 
84
  def _download_and_prepare(self, dl_manager):
@@ -86,10 +87,12 @@ class MARS(evaluate.Metric):
86
  # TODO: Download external resources if needed
87
  pass
88
 
89
- def _compute(self, predictions, references):
90
  """Returns the scores"""
91
  # TODO: Compute the different scores of the module
92
- accuracy = sum(i == j for i, j in zip(predictions, references)) / len(predictions)
 
 
93
  return {
94
  "accuracy": accuracy,
95
- }
 
13
  # limitations under the License.
14
  """TODO: Add a description here."""
15
 
 
16
  import datasets
17
+ import evaluate
18
 
19
  # TODO: Add BibTeX citation
20
  _CITATION = """\
 
60
  class MARS(evaluate.Metric):
61
  """TODO: Short description of my evaluation module."""
62
 
63
+ def _info(self) -> evaluate.MetricInfo:
64
  # TODO: Specifies the evaluate.EvaluationModuleInfo object
65
  return evaluate.MetricInfo(
66
  # This is the description that will appear on the modules page.
 
69
  citation=_CITATION,
70
  inputs_description=_KWARGS_DESCRIPTION,
71
  # This defines the format of each prediction and reference
72
+ features=datasets.Features(
73
+ {
74
+ "predictions": datasets.Value("int64"),
75
+ "references": datasets.Value("int64"),
76
+ }
77
+ ),
78
  # Homepage of the module for documentation
79
  homepage="http://module.homepage",
80
  # Additional links to the codebase or references
81
  codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
82
+ reference_urls=["http://path.to.reference.url/new_module"],
83
  )
84
 
85
  def _download_and_prepare(self, dl_manager):
 
87
  # TODO: Download external resources if needed
88
  pass
89
 
90
+ def _compute(self, predictions, references) -> dict[str, float]:
91
  """Returns the scores"""
92
  # TODO: Compute the different scores of the module
93
+ accuracy = sum(i == j for i, j in zip(predictions, references)) / len(
94
+ predictions
95
+ )
96
  return {
97
  "accuracy": accuracy,
98
+ }
tests.py CHANGED
@@ -1,17 +1,5 @@
1
  test_cases = [
2
- {
3
- "predictions": [0, 0],
4
- "references": [1, 1],
5
- "result": {"metric_score": 0}
6
- },
7
- {
8
- "predictions": [1, 1],
9
- "references": [1, 1],
10
- "result": {"metric_score": 1}
11
- },
12
- {
13
- "predictions": [1, 0],
14
- "references": [1, 1],
15
- "result": {"metric_score": 0.5}
16
- }
17
- ]
 
1
  test_cases = [
2
+ {"predictions": [0, 0], "references": [1, 1], "result": {"metric_score": 0}},
3
+ {"predictions": [1, 1], "references": [1, 1], "result": {"metric_score": 1}},
4
+ {"predictions": [1, 0], "references": [1, 1], "result": {"metric_score": 0.5}},
5
+ ]