hzhwcmhf commited on
Commit
0db6030
·
1 Parent(s): 85cdb55
Files changed (2) hide show
  1. Dockerfile +1 -1
  2. app.py +2 -2
Dockerfile CHANGED
@@ -8,7 +8,7 @@ WORKDIR /home/user/app
8
 
9
  RUN sed -i 's http://deb.debian.org http://cdn-aws.deb.debian.org g' /etc/apt/sources.list && sed -i 's http://archive.ubuntu.com http://us-east-1.ec2.archive.ubuntu.com g' /etc/apt/sources.list && sed -i '/security/d' /etc/apt/sources.list && apt-get update && apt-get install -y git git-lfs ffmpeg libsm6 libxext6 cmake libgl1-mesa-glx && rm -rf /var/lib/apt/lists/* && git lfs install
10
 
11
- RUN pip install --no-cache-dir Cython "gradio==3.28.3" "torch==1.10.1"
12
 
13
  RUN git clone --recurse-submodules https://github.com/thu-coai/DA-Transformer.git && cd DA-Transformer && git checkout demo && pip install -e . && cd dag_search && python3 setup.py build_ext --inplace && pip install -e . && cd ../..
14
 
 
8
 
9
  RUN sed -i 's http://deb.debian.org http://cdn-aws.deb.debian.org g' /etc/apt/sources.list && sed -i 's http://archive.ubuntu.com http://us-east-1.ec2.archive.ubuntu.com g' /etc/apt/sources.list && sed -i '/security/d' /etc/apt/sources.list && apt-get update && apt-get install -y git git-lfs ffmpeg libsm6 libxext6 cmake libgl1-mesa-glx && rm -rf /var/lib/apt/lists/* && git lfs install
10
 
11
+ RUN pip install --no-cache-dir Cython "gradio==3.28.3" "torch==1.10.1" jieba subword-nmt
12
 
13
  RUN git clone --recurse-submodules https://github.com/thu-coai/DA-Transformer.git && cd DA-Transformer && git checkout demo && pip install -e . && cd dag_search && python3 setup.py build_ext --inplace && pip install -e . && cd ../..
14
 
app.py CHANGED
@@ -28,7 +28,7 @@ from mass.s2s_model import TransformerMASSModel
28
  from transformer.hub_interface import TransformerHubInterface
29
 
30
  notice_markdown = ("""
31
- # Directed Acyclic Transformer: A Non-Autoregressive Sequence-to-Sequence Model designed for Parallel Text Generation.
32
  - **Fast Generation**: DA-Transformer offers faster inference compared to autoregressive Transformers (with fairseq implementation), with a reduction in latency by 7~14x and an increase in throughput by ~20x.
33
  - **High Quality**: DA-Transformer performs competitively with autoregressive Transformers, even with pre-trained models like BART, in a variety of text generation tasks.
34
  - **Easy Training**: DA-Transformer can be trained end-to-end without requiring knowledge distillation, making it simple and straightforward to train.
@@ -527,7 +527,7 @@ def build_demo():
527
 
528
  gr.Markdown(learn_more_markdown)
529
 
530
- compare_load(demo.load)
531
 
532
  demo.load(None,None,None,_js=open("global.js").read())
533
  return demo
 
28
  from transformer.hub_interface import TransformerHubInterface
29
 
30
  notice_markdown = ("""
31
+ # Directed Acyclic Transformer: A Non-Autoregressive Sequence-to-Sequence Model designed for Parallel Text Generation.
32
  - **Fast Generation**: DA-Transformer offers faster inference compared to autoregressive Transformers (with fairseq implementation), with a reduction in latency by 7~14x and an increase in throughput by ~20x.
33
  - **High Quality**: DA-Transformer performs competitively with autoregressive Transformers, even with pre-trained models like BART, in a variety of text generation tasks.
34
  - **Easy Training**: DA-Transformer can be trained end-to-end without requiring knowledge distillation, making it simple and straightforward to train.
 
527
 
528
  gr.Markdown(learn_more_markdown)
529
 
530
+ detail_load(demo.load)
531
 
532
  demo.load(None,None,None,_js=open("global.js").read())
533
  return demo