Pragformer commited on
Commit
6f94048
·
1 Parent(s): c683650

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -58
app.py CHANGED
@@ -4,68 +4,68 @@ import torch
4
  import json
5
 
6
  # load all models
7
- pragformer = transformers.AutoModel.from_pretrained("Pragformer/PragFormer", trust_remote_code=True)
8
- pragformer_private = transformers.AutoModel.from_pretrained("Pragformer/PragFormer_private", trust_remote_code=True)
9
- pragformer_reduction = transformers.AutoModel.from_pretrained("Pragformer/PragFormer_reduction", trust_remote_code=True)
10
 
11
 
12
  #Event Listeners
13
 
14
- tokenizer = transformers.AutoTokenizer.from_pretrained('NTUYG/DeepSCC-RoBERTa')
15
 
16
- with open('c_data.json', 'r') as f:
17
- data = json.load(f)
18
 
19
- def fill_code(code_pth):
20
- return data[code_pth]['pragma'], data[code_pth]['code']
21
 
22
 
23
- def predict(code_txt):
24
- code = code_txt.lstrip().rstrip()
25
- tokenized = tokenizer.batch_encode_plus(
26
- [code],
27
- max_length = 150,
28
- pad_to_max_length = True,
29
- truncation = True
30
- )
31
- pred = pragformer(torch.tensor(tokenized['input_ids']), torch.tensor(tokenized['attention_mask']))
32
-
33
- y_hat = torch.argmax(pred).item()
34
- return 'With OpenMP' if y_hat==1 else 'Without OpenMP', torch.nn.Softmax(dim=1)(pred).squeeze()[y_hat].item()
35
-
36
-
37
- def is_private(code_txt):
38
- code = code_txt.lstrip().rstrip()
39
- tokenized = tokenizer.batch_encode_plus(
40
- [code],
41
- max_length = 150,
42
- pad_to_max_length = True,
43
- truncation = True
44
- )
45
- pred = pragformer_private(torch.tensor(tokenized['input_ids']), torch.tensor(tokenized['attention_mask']))
46
-
47
- y_hat = torch.argmax(pred).item()
48
- if y_hat == 0:
49
- return gr.update(visible=False)
50
- else:
51
- return gr.update(value=f"Confidence: {torch.nn.Softmax(dim=1)(pred).squeeze()[y_hat].item()}", visible=True)
52
-
53
-
54
- def is_reduction(code_txt):
55
- code = code_txt.lstrip().rstrip()
56
- tokenized = tokenizer.batch_encode_plus(
57
- [code],
58
- max_length = 150,
59
- pad_to_max_length = True,
60
- truncation = True
61
- )
62
- pred = pragformer_reduction(torch.tensor(tokenized['input_ids']), torch.tensor(tokenized['attention_mask']))
63
-
64
- y_hat = torch.argmax(pred).item()
65
- if y_hat == 0:
66
- return gr.update(visible=False)
67
- else:
68
- return gr.update(value=f"Confidence: {torch.nn.Softmax(dim=1)(pred).squeeze()[y_hat].item()}", visible=True)
69
 
70
 
71
  # Define GUI
@@ -118,10 +118,10 @@ with gr.Blocks() as pragformer_gui:
118
  private = gr.Textbox(label="Private", visible=False)
119
  reduction = gr.Textbox(label="Reduction", visible=False)
120
 
121
- submit_btn.click(fn=predict, inputs=code_in, outputs=[label_out, confidence_out])
122
- submit_btn.click(fn=is_private, inputs=code_in, outputs=private)
123
- submit_btn.click(fn=is_reduction, inputs=code_in, outputs=reduction)
124
- sample_btn.click(fn=fill_code, inputs=drop, outputs=[pragma, code_in])
125
 
126
 
127
  # pragformer_gui.launch()
 
4
  import json
5
 
6
  # load all models
7
+ # pragformer = transformers.AutoModel.from_pretrained("Pragformer/PragFormer", trust_remote_code=True)
8
+ # pragformer_private = transformers.AutoModel.from_pretrained("Pragformer/PragFormer_private", trust_remote_code=True)
9
+ # pragformer_reduction = transformers.AutoModel.from_pretrained("Pragformer/PragFormer_reduction", trust_remote_code=True)
10
 
11
 
12
  #Event Listeners
13
 
14
+ # tokenizer = transformers.AutoTokenizer.from_pretrained('NTUYG/DeepSCC-RoBERTa')
15
 
16
+ # with open('c_data.json', 'r') as f:
17
+ # data = json.load(f)
18
 
19
+ # def fill_code(code_pth):
20
+ # return data[code_pth]['pragma'], data[code_pth]['code']
21
 
22
 
23
+ # def predict(code_txt):
24
+ # code = code_txt.lstrip().rstrip()
25
+ # tokenized = tokenizer.batch_encode_plus(
26
+ # [code],
27
+ # max_length = 150,
28
+ # pad_to_max_length = True,
29
+ # truncation = True
30
+ # )
31
+ # pred = pragformer(torch.tensor(tokenized['input_ids']), torch.tensor(tokenized['attention_mask']))
32
+
33
+ # y_hat = torch.argmax(pred).item()
34
+ # return 'With OpenMP' if y_hat==1 else 'Without OpenMP', torch.nn.Softmax(dim=1)(pred).squeeze()[y_hat].item()
35
+
36
+
37
+ # def is_private(code_txt):
38
+ # code = code_txt.lstrip().rstrip()
39
+ # tokenized = tokenizer.batch_encode_plus(
40
+ # [code],
41
+ # max_length = 150,
42
+ # pad_to_max_length = True,
43
+ # truncation = True
44
+ # )
45
+ # pred = pragformer_private(torch.tensor(tokenized['input_ids']), torch.tensor(tokenized['attention_mask']))
46
+
47
+ # y_hat = torch.argmax(pred).item()
48
+ # if y_hat == 0:
49
+ # return gr.update(visible=False)
50
+ # else:
51
+ # return gr.update(value=f"Confidence: {torch.nn.Softmax(dim=1)(pred).squeeze()[y_hat].item()}", visible=True)
52
+
53
+
54
+ # def is_reduction(code_txt):
55
+ # code = code_txt.lstrip().rstrip()
56
+ # tokenized = tokenizer.batch_encode_plus(
57
+ # [code],
58
+ # max_length = 150,
59
+ # pad_to_max_length = True,
60
+ # truncation = True
61
+ # )
62
+ # pred = pragformer_reduction(torch.tensor(tokenized['input_ids']), torch.tensor(tokenized['attention_mask']))
63
+
64
+ # y_hat = torch.argmax(pred).item()
65
+ # if y_hat == 0:
66
+ # return gr.update(visible=False)
67
+ # else:
68
+ # return gr.update(value=f"Confidence: {torch.nn.Softmax(dim=1)(pred).squeeze()[y_hat].item()}", visible=True)
69
 
70
 
71
  # Define GUI
 
118
  private = gr.Textbox(label="Private", visible=False)
119
  reduction = gr.Textbox(label="Reduction", visible=False)
120
 
121
+ # submit_btn.click(fn=predict, inputs=code_in, outputs=[label_out, confidence_out])
122
+ # submit_btn.click(fn=is_private, inputs=code_in, outputs=private)
123
+ # submit_btn.click(fn=is_reduction, inputs=code_in, outputs=reduction)
124
+ # sample_btn.click(fn=fill_code, inputs=drop, outputs=[pragma, code_in])
125
 
126
 
127
  # pragformer_gui.launch()