cache_resource
Browse files
app.py
CHANGED
@@ -16,7 +16,7 @@ os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
|
16 |
random.seed(None)
|
17 |
suggested_text_list = ['驻注诐 讗讞转, 诇驻谞讬 砖谞讬诐 专讘讜转','砖诇讜诐, 拽讜专讗讬诐 诇讬 讚讜专讜谉 讜讗谞讬','讘讜拽专 讟讜讘 诇讻讜诇诐','讜讗讝 讛驻专转讬 讗转 讻诇 讻诇诇讬 讛讟拽住 讻砖']
|
18 |
|
19 |
-
@st.
|
20 |
def load_model(model_name):
|
21 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
22 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
|
|
16 |
random.seed(None)
|
17 |
suggested_text_list = ['驻注诐 讗讞转, 诇驻谞讬 砖谞讬诐 专讘讜转','砖诇讜诐, 拽讜专讗讬诐 诇讬 讚讜专讜谉 讜讗谞讬','讘讜拽专 讟讜讘 诇讻讜诇诐','讜讗讝 讛驻专转讬 讗转 讻诇 讻诇诇讬 讛讟拽住 讻砖']
|
18 |
|
19 |
+
@st.cache_resource
|
20 |
def load_model(model_name):
|
21 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
22 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|