TKgumi commited on
Commit
c423e28
·
verified ·
1 Parent(s): bdec963

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -1,12 +1,17 @@
1
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
 
2
 
3
- MODEL_NAME = "Lightblue/DeepSeek-R1-Distill-Qwen-7B-Japanese"
4
 
5
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
6
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
 
 
 
 
7
 
8
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
9
 
10
- prompt = "こんにちは、これはテストです。"
11
- result = generator(prompt, max_length=100)[0]['generated_text']
12
- print(result)
 
1
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
2
+ import torch
3
 
4
+ MODEL_NAME = "rinna/japanese-gpt-0.5b" # 500Mの軽量モデル
5
 
6
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
7
+ model = AutoModelForCausalLM.from_pretrained(
8
+ MODEL_NAME,
9
+ torch_dtype=torch.float16, # メモリ節約
10
+ low_cpu_mem_usage=True # メモリ圧縮
11
+ )
12
 
13
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
14
 
15
+ def generate_text(prompt: str, max_length: int = 100):
16
+ return generator(prompt, max_length=max_length)[0]['generated_text']
17
+