Abdulmohsena commited on
Commit
565b974
·
1 Parent(s): 6b4436c
Files changed (4) hide show
  1. .gitignore +1 -0
  2. README.md +5 -41
  3. classicier.py +43 -90
  4. requirements.txt +4 -1
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ train
README.md CHANGED
@@ -1,50 +1,14 @@
1
  ---
2
  title: classicier
3
- datasets:
4
- -
 
5
  tags:
6
  - evaluate
7
  - metric
8
- description: "TODO: add a description here"
9
  sdk: gradio
10
  sdk_version: 3.19.1
11
  app_file: app.py
12
  pinned: false
13
- ---
14
-
15
- # Metric Card for classicier
16
-
17
- ***Module Card Instructions:*** *Fill out the following subsections. Feel free to take a look at existing metric cards if you'd like examples.*
18
-
19
- ## Metric Description
20
- *Give a brief overview of this metric, including what task(s) it is usually used for, if any.*
21
-
22
- ## How to Use
23
- *Give general statement of how to use the metric*
24
-
25
- *Provide simplest possible example for using the metric*
26
-
27
- ### Inputs
28
- *List all input arguments in the format below*
29
- - **input_field** *(type): Definition of input, with explanation if necessary. State any default value(s).*
30
-
31
- ### Output Values
32
-
33
- *Explain what this metric outputs and provide an example of what the metric output looks like. Modules should return a dictionary with one or multiple key-value pairs, e.g. {"bleu" : 6.02}*
34
-
35
- *State the range of possible values that the metric's output can take, as well as what in that range is considered good. For example: "This metric can take on any value between 0 and 100, inclusive. Higher scores are better."*
36
-
37
- #### Values from Popular Papers
38
- *Give examples, preferrably with links to leaderboards or publications, to papers that have reported this metric, along with the values they have reported.*
39
-
40
- ### Examples
41
- *Give code examples of the metric being used. Try to include examples that clear up any potential ambiguity left from the metric description above. If possible, provide a range of examples that show both typical and atypical results, as well as examples where a variety of input parameters are passed.*
42
-
43
- ## Limitations and Bias
44
- *Note any known limitations or biases that the metric has, with links and references if possible.*
45
-
46
- ## Citation
47
- *Cite the source where this metric was introduced.*
48
-
49
- ## Further References
50
- *Add any useful further references.*
 
1
  ---
2
  title: classicier
3
+ emoji:
4
+ colorFrom: blue
5
+ colorTo: pink
6
  tags:
7
  - evaluate
8
  - metric
9
+ description: Classify if a given sentence is in Classical Arabic or Not
10
  sdk: gradio
11
  sdk_version: 3.19.1
12
  app_file: app.py
13
  pinned: false
14
+ ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
classicier.py CHANGED
@@ -1,95 +1,48 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """TODO: Add a description here."""
15
-
16
  import evaluate
17
  import datasets
 
 
18
 
19
-
20
- # TODO: Add BibTeX citation
21
- _CITATION = """\
22
- @InProceedings{huggingface:module,
23
- title = {A great new module},
24
- authors={huggingface, Inc.},
25
- year={2020}
26
- }
27
- """
28
-
29
- # TODO: Add description of the module here
30
- _DESCRIPTION = """\
31
- This new module is designed to solve this great ML task and is crafted with a lot of care.
32
- """
33
-
34
-
35
- # TODO: Add description of the arguments of the module here
36
- _KWARGS_DESCRIPTION = """
37
- Calculates how good are predictions given some references, using certain scores
38
- Args:
39
- predictions: list of predictions to score. Each predictions
40
- should be a string with tokens separated by spaces.
41
- references: list of reference for each prediction. Each
42
- reference should be a string with tokens separated by spaces.
43
- Returns:
44
- accuracy: description of the first score,
45
- another_score: description of the second score,
46
- Examples:
47
- Examples should be written in doctest format, and should illustrate how
48
- to use the function.
49
-
50
- >>> my_new_module = evaluate.load("my_new_module")
51
- >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
52
- >>> print(results)
53
- {'accuracy': 1.0}
54
- """
55
-
56
- # TODO: Define external resources urls if needed
57
- BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
58
-
59
-
60
- @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
61
- class classicier(evaluate.Metric):
62
- """TODO: Short description of my evaluation module."""
63
-
64
  def _info(self):
65
- # TODO: Specifies the evaluate.EvaluationModuleInfo object
66
- return evaluate.MetricInfo(
67
- # This is the description that will appear on the modules page.
68
- module_type="metric",
69
- description=_DESCRIPTION,
70
- citation=_CITATION,
71
- inputs_description=_KWARGS_DESCRIPTION,
72
- # This defines the format of each prediction and reference
73
- features=datasets.Features({
74
- 'predictions': datasets.Value('int64'),
75
- 'references': datasets.Value('int64'),
76
- }),
77
- # Homepage of the module for documentation
78
- homepage="http://module.homepage",
79
- # Additional links to the codebase or references
80
- codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
81
- reference_urls=["http://path.to.reference.url/new_module"]
82
  )
83
-
84
- def _download_and_prepare(self, dl_manager):
85
- """Optional: download external resources useful to compute the scores"""
86
- # TODO: Download external resources if needed
87
- pass
88
-
89
- def _compute(self, predictions, references):
90
- """Returns the scores"""
91
- # TODO: Compute the different scores of the module
92
- accuracy = sum(i == j for i, j in zip(predictions, references)) / len(predictions)
93
- return {
94
- "accuracy": accuracy,
95
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import evaluate
2
  import datasets
3
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
4
+ import torch
5
 
6
+ class Classicier(evaluate.Measurement):
7
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  def _info(self):
9
+ return evaluate.MeasurementInfo(
10
+ description="",
11
+ citation="",
12
+ inputs_description="",
13
+ features=datasets.Features(
14
+ {
15
+ "texts": datasets.Value("string", id="sequence"),
16
+ }
17
+ ),
18
+ reference_urls=[],
 
 
 
 
 
 
 
19
  )
20
+
21
+ def _download_and_prepare(self, dl_manager, device=None):
22
+ if device is None:
23
+ device = "cuda" if torch.cuda.is_available() else "cpu"
24
+
25
+ # Load the tokenizer and model from the specified repository
26
+ self.tokenizer = AutoTokenizer.from_pretrained("AbdulmohsenA/classicier")
27
+ self.model = AutoModelForSequenceClassification.from_pretrained("AbdulmohsenA/classicier")
28
+
29
+ self.model.to(device)
30
+ self.device = device
31
+
32
+ def _compute(self, texts, temperature=2):
33
+ device = self.device
34
+
35
+ inputs = self.tokenizer(
36
+ texts,
37
+ return_tensors="pt",
38
+ truncation=True,
39
+ padding='max_length',
40
+ max_length=128
41
+ ).to(device)
42
+
43
+ with torch.no_grad():
44
+ output = self.model(**inputs)
45
+ prediction = torch.softmax(output.logits / temperature, dim=-1)
46
+ classical_prob = prediction[:, 1].detach().cpu().numpy()
47
+
48
+ return {"classical_score": classical_prob}
requirements.txt CHANGED
@@ -1 +1,4 @@
1
- git+https://github.com/huggingface/evaluate@main
 
 
 
 
1
+ evaluate
2
+ transformers
3
+ torch
4
+ datasets