Update README.md
Browse files
README.md
CHANGED
@@ -47,24 +47,16 @@ Example usage:
|
|
47 |
base_model_name = 't5-large'
|
48 |
tokenizer = T5Tokenizer.from_pretrained(base_model_name, model_max_length=512)
|
49 |
model = T5ForConditionalGeneration.from_pretrained('trichter/t5-DistillingSbS-ABSA')
|
50 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
examples = {'appName': ['Google Chrome', 'Google Chrome'], 'review': ['This app is great, the speed is unmatched', 'Bad app, crashes constantly']}
|
55 |
-
|
56 |
model_inputs = tokenize_function(examples) # assuming example has the fields 'appName' and 'review'. tokenize_function is in the GitHub repo in data_utils.py.
|
57 |
|
58 |
-
outputs = generate(model, model_inputs, return_type = 'labels') # generate() is in the github repo and generates either labels or rationales depening on return_type. Default is 'labels' but can be changed to 'rationales'
|
59 |
-
|
60 |
-
tokenizer.decode(outputs[
|
61 |
-
|
62 |
-
tokenizer.decode(outputs[1], skip_special_tokens=True) # prints '"crashes": "negative"'
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
outputs = generate(model, model_inputs, return_type = 'rationales')
|
67 |
-
|
68 |
-
tokenizer.decode(outputs[0], skip_special_tokens=True) # prints '"speed": "the review explicitly mentions that the speed of the app is unmatched, indicating satisfaction with its performance in terms of speed."'
|
69 |
|
|
|
|
|
|
|
70 |
tokenizer.decode(outputs[1], skip_special_tokens=True) # prints '"crashes": "the app crashing constantly is explicitly mentioned as a major issue, indicating dissatisfaction with its stability."'
|
|
|
47 |
base_model_name = 't5-large'
|
48 |
tokenizer = T5Tokenizer.from_pretrained(base_model_name, model_max_length=512)
|
49 |
model = T5ForConditionalGeneration.from_pretrained('trichter/t5-DistillingSbS-ABSA')
|
50 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
51 |
+
model.to(device)
|
52 |
+
examples = {'appName': ['Google Chrome', 'Google Chrome'], 'review': ['This app is great, the speed is unmatched', 'Bad app, crashes constantly']}
|
|
|
|
|
|
|
53 |
model_inputs = tokenize_function(examples) # assuming example has the fields 'appName' and 'review'. tokenize_function is in the GitHub repo in data_utils.py.
|
54 |
|
55 |
+
outputs = generate(model, model_inputs, return_type = 'labels') # generate() is in the github repo and generates either labels or rationales depening on return_type. Default is 'labels' but can be changed to 'rationales'
|
56 |
+
tokenizer.decode(outputs[0], skip_special_tokens=True) # prints '"speed": "positive"'
|
57 |
+
tokenizer.decode(outputs[1], skip_special_tokens=True) # prints '"crashes": "negative"'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
+
|
60 |
+
outputs = generate(model, model_inputs, return_type = 'rationales')
|
61 |
+
tokenizer.decode(outputs[0], skip_special_tokens=True) # prints '"speed": "the review explicitly mentions that the speed of the app is unmatched, indicating satisfaction with its performance in terms of speed."'
|
62 |
tokenizer.decode(outputs[1], skip_special_tokens=True) # prints '"crashes": "the app crashing constantly is explicitly mentioned as a major issue, indicating dissatisfaction with its stability."'
|