File size: 2,181 Bytes
b71d998
 
 
1847cde
b71d998
 
9230b05
 
 
1847cde
9230b05
1847cde
9230b05
 
1847cde
6fdccff
 
9230b05
b71d998
 
 
1847cde
 
 
407fe90
1847cde
 
 
 
b71d998
1847cde
b71d998
1847cde
 
9230b05
 
1847cde
b71d998
 
 
 
 
 
 
 
 
407fe90
b71d998
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
from fixed_f1 import FixedF1
from fixed_precision import FixedPrecision
from fixed_recall import FixedRecall
import evaluate
import gradio as gr

title = "'Combine' multiple metrics with this 🤗 Evaluate 🪲 Fix!"

description = """<p style='text-align: center'>
As I introduce myself to the entirety of the 🤗 ecosystem, I've put together this space to show off a temporary fix for a current 🪲 in the 🤗 Evaluate library. \n

    Check out the original, longstanding issue [here](https://github.com/huggingface/evaluate/issues/234). This details how it is currently impossible to \
'evaluate.combine()' multiple metrics related to multilabel text classification. Particularly, one cannot 'combine()' the f1, precision, and recall scores for \
evaluation. I encountered this issue specifically while training [RoBERTa-base-DReiFT](https://huggingface.co/MarioBarbeque/RoBERTa-base-DReiFT) for multilabel \
text classification of 805 labeled medical conditions based on drug reviews. \n

Try to use \t to write some code? \t or how does that work? </p>


"""

article = "<p style='text-align: center'> Check out the [original repo](https://github.com/johngrahamreynolds/FixedMetricsForHF) housing this code, and a quickly \
trained [multilabel text classification model](https://github.com/johngrahamreynolds/RoBERTa-base-DReiFT/tree/main) that makes use of it during evaluation.</p>"

def show_off(predictions=[0,1,2], references=[0,1,2], weighting_map={"f1":"weighted", "precision": "micro", "recall": "weighted"}): 

    f1 = FixedF1(average=weighting_map["f1"])
    precision = FixedPrecision(average=weighting_map["precision"])
    recall = FixedRecall(average=weighting_map["recall"])

    combined = evaluate.combine([f1, recall, precision])

    combined.add_batch(prediction=predictions, reference=references)
    outputs =  combined.compute()


    return "Your metrics are as follows: \n" + outputs


gr.Interface(
    fn=show_off,
    inputs="textbox",
    outputs="text",
    title=title,
    description=description,
    article=article,
    examples=[[[1, 0, 2, 0, 1], [1,0,0,0,1], {"f1":"weighted", "precision": "micro", "recall": "weighted"}]],
).launch()