Aye10032 commited on
Commit
afa91bc
·
1 Parent(s): f1c42ea
Files changed (6) hide show
  1. .gitignore +164 -0
  2. .python-version +1 -0
  3. main.py +6 -0
  4. pyproject.toml +9 -0
  5. top5_error_rate.py +79 -0
  6. uv.lock +0 -0
.gitignore ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Python template
2
+ # Byte-compiled / optimized / DLL files
3
+ __pycache__/
4
+ *.py[cod]
5
+ *$py.class
6
+
7
+ # C extensions
8
+ *.so
9
+
10
+ # Distribution / packaging
11
+ .Python
12
+ build/
13
+ develop-eggs/
14
+ dist/
15
+ downloads/
16
+ eggs/
17
+ .eggs/
18
+ lib/
19
+ lib64/
20
+ parts/
21
+ sdist/
22
+ var/
23
+ wheels/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+ cover/
54
+
55
+ # Translations
56
+ *.mo
57
+ *.pot
58
+
59
+ # Django stuff:
60
+ *.log
61
+ local_settings.py
62
+ db.sqlite3
63
+ db.sqlite3-journal
64
+
65
+ # Flask stuff:
66
+ instance/
67
+ .webassets-cache
68
+
69
+ # Scrapy stuff:
70
+ .scrapy
71
+
72
+ # Sphinx documentation
73
+ docs/_build/
74
+
75
+ # PyBuilder
76
+ .pybuilder/
77
+ target/
78
+
79
+ # Jupyter Notebook
80
+ .ipynb_checkpoints
81
+
82
+ # IPython
83
+ profile_default/
84
+ ipython_config.py
85
+
86
+ # pyenv
87
+ # For a library or package, you might want to ignore these files since the code is
88
+ # intended to run in multiple environments; otherwise, check them in:
89
+ # .python-version
90
+
91
+ # pipenv
92
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
93
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
94
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
95
+ # install all needed dependencies.
96
+ #Pipfile.lock
97
+
98
+ # poetry
99
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
100
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
101
+ # commonly ignored for libraries.
102
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
103
+ #poetry.lock
104
+
105
+ # pdm
106
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
107
+ #pdm.lock
108
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
109
+ # in version control.
110
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
111
+ .pdm.toml
112
+ .pdm-python
113
+ .pdm-build/
114
+
115
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
116
+ __pypackages__/
117
+
118
+ # Celery stuff
119
+ celerybeat-schedule
120
+ celerybeat.pid
121
+
122
+ # SageMath parsed files
123
+ *.sage.py
124
+
125
+ # Environments
126
+ .env
127
+ .venv
128
+ env/
129
+ venv/
130
+ ENV/
131
+ env.bak/
132
+ venv.bak/
133
+
134
+ # Spyder project settings
135
+ .spyderproject
136
+ .spyproject
137
+
138
+ # Rope project settings
139
+ .ropeproject
140
+
141
+ # mkdocs documentation
142
+ /site
143
+
144
+ # mypy
145
+ .mypy_cache/
146
+ .dmypy.json
147
+ dmypy.json
148
+
149
+ # Pyre type checker
150
+ .pyre/
151
+
152
+ # pytype static type analyzer
153
+ .pytype/
154
+
155
+ # Cython debug symbols
156
+ cython_debug/
157
+
158
+ # PyCharm
159
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
160
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
161
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
162
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
163
+ #.idea/
164
+
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.13
main.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import evaluate
2
+ from evaluate.utils import launch_gradio_widget
3
+
4
+
5
+ module = evaluate.load("Aye10032/top5_error_rate")
6
+ launch_gradio_widget(module)
pyproject.toml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "top5-error-rate"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.13"
7
+ dependencies = [
8
+ "evaluate[template]>=0.4.3",
9
+ ]
top5_error_rate.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Any
2
+
3
+ import datasets
4
+ import evaluate
5
+ import numpy as np
6
+ from evaluate.utils.file_utils import add_start_docstrings
7
+
8
+ _DESCRIPTION = """
9
+ The "top-5 error" is the percentage of times that the target label does not appear among the 5 highest-probability predictions. It can be computed with:
10
+ Top-5 Error Rate = 1 - Top-5 Accuracy
11
+ or equivalently:
12
+ Top-5 Error Rate = (Number of incorrect top-5 predictions) / (Total number of cases processed)
13
+ Where:
14
+ - Top-5 Accuracy: The proportion of cases where the true label is among the model's top 5 predicted classes.
15
+ - Incorrect top-5 prediction: The true label is not in the top 5 predicted classes (ranked by probability).
16
+ """
17
+
18
+
19
+ _KWARGS_DESCRIPTION = """
20
+ Args:
21
+ predictions (`list` of `int`): Predicted labels.
22
+ references (`list` of `int`): Ground truth labels.
23
+ Returns:
24
+ accuracy (`float` or `int`): Accuracy score. Minimum possible value is 0. Maximum possible value is 1.0, or the number of examples input.
25
+ Examples:
26
+ >>> accuracy_metric = evaluate.load("accuracy")
27
+ >>> results = accuracy_metric.compute(references=[0, 1, 2, 0, 1, 2], predictions=[0, 1, 1, 2, 1, 0])
28
+ >>> print(results)
29
+ {'accuracy': 0.5}
30
+ """
31
+
32
+
33
+ _CITATION = """
34
+ """
35
+
36
+
37
+ @add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
38
+ class Top5ErrorRate(evaluate.Metric):
39
+ def _info(self):
40
+ return evaluate.MetricInfo(
41
+ description=_DESCRIPTION,
42
+ citation=_CITATION,
43
+ inputs_description=_KWARGS_DESCRIPTION,
44
+ features=datasets.Features(
45
+ {
46
+ "predictions": datasets.Sequence(datasets.Value("int32")),
47
+ "references": datasets.Sequence(datasets.Value("int32")),
48
+ }
49
+ ),
50
+ reference_urls=[],
51
+ )
52
+
53
+ def _compute(
54
+ self,
55
+ *,
56
+ predictions: list[list[float]] = None,
57
+ references: list[float] = None,
58
+ **kwargs,
59
+ ) -> Dict[str, Any]:
60
+ # 确保输入是numpy数组
61
+ predictions = np.array(predictions)
62
+ references = np.array(references)
63
+
64
+ # 获取每个样本的top-5预测类别
65
+ top5_pred = np.argsort(predictions, axis=1)[:, -5:]
66
+
67
+ # 计算top-5错误率
68
+ correct = 0
69
+ total = len(references)
70
+
71
+ for i in range(total):
72
+ if references[i] in top5_pred[i]:
73
+ correct += 1
74
+
75
+ error_rate = 1.0 - (correct / total)
76
+
77
+ return {
78
+ "top5_error_rate": float(error_rate)
79
+ }
uv.lock ADDED
The diff for this file is too large to render. See raw diff