trichter commited on
Commit
d317cdd
·
verified ·
1 Parent(s): 4f4193a

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +12 -0
README.md CHANGED
@@ -45,17 +45,29 @@ Max Sequence Length: 512
45
  Example usage:
46
 
47
  base_model_name = 't5-large'
 
48
  tokenizer = T5Tokenizer.from_pretrained(base_model_name, model_max_length=512)
 
49
  model = T5ForConditionalGeneration.from_pretrained('trichter/t5-DistillingSbS-ABSA')
 
50
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
51
  model.to(device)
 
52
  examples = {'appName': ['Google Chrome', 'Google Chrome'], 'review': ['This app is great, the speed is unmatched', 'Bad app, crashes constantly']}
 
53
  model_inputs = tokenize_function(examples) # assuming example has the fields 'appName' and 'review'. tokenize_function is in the GitHub repo in data_utils.py.
54
 
55
  outputs = generate(model, model_inputs, return_type = 'labels') # generate() is in the github repo and generates either labels or rationales depening on return_type. Default is 'labels' but can be changed to 'rationales'
 
56
  tokenizer.decode(outputs[0], skip_special_tokens=True) # prints '"speed": "positive"'
 
57
  tokenizer.decode(outputs[1], skip_special_tokens=True) # prints '"crashes": "negative"'
58
 
 
 
59
  outputs = generate(model, model_inputs, return_type = 'rationales')
 
60
  tokenizer.decode(outputs[0], skip_special_tokens=True) # prints '"speed": "the review explicitly mentions that the speed of the app is unmatched, indicating satisfaction with its performance in terms of speed."'
 
61
  tokenizer.decode(outputs[1], skip_special_tokens=True) # prints '"crashes": "the app crashing constantly is explicitly mentioned as a major issue, indicating dissatisfaction with its stability."'
 
45
  Example usage:
46
 
47
  base_model_name = 't5-large'
48
+
49
  tokenizer = T5Tokenizer.from_pretrained(base_model_name, model_max_length=512)
50
+
51
  model = T5ForConditionalGeneration.from_pretrained('trichter/t5-DistillingSbS-ABSA')
52
+
53
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
54
+
55
  model.to(device)
56
+
57
  examples = {'appName': ['Google Chrome', 'Google Chrome'], 'review': ['This app is great, the speed is unmatched', 'Bad app, crashes constantly']}
58
+
59
  model_inputs = tokenize_function(examples) # assuming example has the fields 'appName' and 'review'. tokenize_function is in the GitHub repo in data_utils.py.
60
 
61
  outputs = generate(model, model_inputs, return_type = 'labels') # generate() is in the github repo and generates either labels or rationales depening on return_type. Default is 'labels' but can be changed to 'rationales'
62
+
63
  tokenizer.decode(outputs[0], skip_special_tokens=True) # prints '"speed": "positive"'
64
+
65
  tokenizer.decode(outputs[1], skip_special_tokens=True) # prints '"crashes": "negative"'
66
 
67
+
68
+
69
  outputs = generate(model, model_inputs, return_type = 'rationales')
70
+
71
  tokenizer.decode(outputs[0], skip_special_tokens=True) # prints '"speed": "the review explicitly mentions that the speed of the app is unmatched, indicating satisfaction with its performance in terms of speed."'
72
+
73
  tokenizer.decode(outputs[1], skip_special_tokens=True) # prints '"crashes": "the app crashing constantly is explicitly mentioned as a major issue, indicating dissatisfaction with its stability."'