dongxiaoqun
commited on
Commit
·
519c30a
1
Parent(s):
b2cca37
Update README.md
Browse files
README.md
CHANGED
@@ -24,50 +24,9 @@ Task: Summarization
|
|
24 |
```python
|
25 |
from transformers import PegasusForConditionalGeneration,BertTokenizer
|
26 |
from typing import List, Optional
|
27 |
-
|
28 |
-
# Need to download tokenizers_pegasus.py and other Python script from Fengshenbang-LM github repo in advance,
|
29 |
-
# or you can mv download in tokenizers_pegasus.py and data_utils.py in https://huggingface.co/IDEA-CCNL/Randeng_Pegasus_523M_Summary/tree/main
|
30 |
-
# Strongly recommend you git clone the Fengshenbang-LM repo:
|
31 |
-
# 1. git clone https://github.com/IDEA-CCNL/Fengshenbang-LM
|
32 |
-
# 2. cd Fengshenbang-LM/fengshen/examples/pegasus/
|
33 |
-
# and then you will see the tokenizers_pegasus.py and data_utils.py which are needed by pegasus model
|
34 |
-
# from tokenizers_pegasus import PegasusTokenizer
|
35 |
-
class PegasusTokenizer(BertTokenizer):
|
36 |
-
model_input_names = ["input_ids", "attention_mask"]
|
37 |
-
def __init__(self, **kwargs):
|
38 |
-
super().__init__(**kwargs)
|
39 |
-
self.add_special_tokens({'additional_special_tokens':["<mask_1>"]})
|
40 |
-
|
41 |
-
def build_inputs_with_special_tokens(
|
42 |
-
self,
|
43 |
-
token_ids_0: List[int],
|
44 |
-
token_ids_1: Optional[List[int]] = None) -> List[int]:
|
45 |
-
|
46 |
-
if token_ids_1 is None:
|
47 |
-
return token_ids_0 + [self.eos_token_id]
|
48 |
-
return token_ids_0 + token_ids_1 + [self.eos_token_id]
|
49 |
-
|
50 |
-
def _special_token_mask(self, seq):
|
51 |
-
all_special_ids = set(
|
52 |
-
self.all_special_ids) # call it once instead of inside list comp
|
53 |
-
# all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
|
54 |
-
return [1 if x in all_special_ids else 0 for x in seq]
|
55 |
-
|
56 |
-
def get_special_tokens_mask(
|
57 |
-
self,
|
58 |
-
token_ids_0: List[int],
|
59 |
-
token_ids_1: Optional[List[int]] = None,
|
60 |
-
already_has_special_tokens: bool = False) -> List[int]:
|
61 |
-
if already_has_special_tokens:
|
62 |
-
return self._special_token_mask(token_ids_0)
|
63 |
-
elif token_ids_1 is None:
|
64 |
-
return self._special_token_mask(token_ids_0) + [self.eos_token_id]
|
65 |
-
else:
|
66 |
-
return self._special_token_mask(token_ids_0 +
|
67 |
-
token_ids_1) + [self.eos_token_id]
|
68 |
|
69 |
model = PegasusForConditionalGeneration.from_pretrained('dongxq/test_model')
|
70 |
-
tokenizer =
|
71 |
|
72 |
text = "在北京冬奥会自由式滑雪女子坡面障碍技巧决赛中,中国选手谷爱凌夺得银牌。祝贺谷爱凌!今天上午,自由式滑雪女子坡面障碍技巧决赛举行。决赛分三轮进行,取选手最佳成绩排名决出奖牌。第一跳,中国选手谷爱凌获得69.90分。在12位选手中排名第三。完成动作后,谷爱凌又扮了个鬼脸,甚是可爱。第二轮中,谷爱凌在道具区第三个障碍处失误,落地时摔倒。获得16.98分。网友:摔倒了也没关系,继续加油!在第二跳失误摔倒的情况下,谷爱凌顶住压力,第三跳稳稳发挥,流畅落地!获得86.23分!此轮比赛,共12位选手参赛,谷爱凌第10位出场。网友:看比赛时我比谷爱凌紧张,加油!"
|
73 |
inputs = tokenizer(text, max_length=512, return_tensors="pt")
|
|
|
24 |
```python
|
25 |
from transformers import PegasusForConditionalGeneration,BertTokenizer
|
26 |
from typing import List, Optional
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
model = PegasusForConditionalGeneration.from_pretrained('dongxq/test_model')
|
29 |
+
tokenizer = BertTokenizer.from_pretrained('dongxq/test_model')
|
30 |
|
31 |
text = "在北京冬奥会自由式滑雪女子坡面障碍技巧决赛中,中国选手谷爱凌夺得银牌。祝贺谷爱凌!今天上午,自由式滑雪女子坡面障碍技巧决赛举行。决赛分三轮进行,取选手最佳成绩排名决出奖牌。第一跳,中国选手谷爱凌获得69.90分。在12位选手中排名第三。完成动作后,谷爱凌又扮了个鬼脸,甚是可爱。第二轮中,谷爱凌在道具区第三个障碍处失误,落地时摔倒。获得16.98分。网友:摔倒了也没关系,继续加油!在第二跳失误摔倒的情况下,谷爱凌顶住压力,第三跳稳稳发挥,流畅落地!获得86.23分!此轮比赛,共12位选手参赛,谷爱凌第10位出场。网友:看比赛时我比谷爱凌紧张,加油!"
|
32 |
inputs = tokenizer(text, max_length=512, return_tensors="pt")
|