Seunggg commited on
Commit
8f62c1d
·
verified ·
1 Parent(s): fd45282

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -16
app.py CHANGED
@@ -2,24 +2,24 @@ import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
- model_id = "your-username/lora-plant-deepseek"
 
6
 
7
- # 加载模型
8
  tokenizer = AutoTokenizer.from_pretrained(model_id)
9
- model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
10
 
11
- def plant_ask(user_input):
12
- prompt = f"用户提问:{user_input}\n请用人性化语言回答,并建议一些可查阅的植物文献资料。\n回答:"
 
13
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
14
- outputs = model.generate(**inputs, max_new_tokens=300)
15
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
 
16
 
17
- iface = gr.Interface(
18
- fn=plant_ask,
19
- inputs="text",
20
- outputs="text",
21
- title="🌱 植物助手问答系统",
22
- description="欢迎提问关于植物养护、生长环境、病虫害防治等问题,我会尽力给出人性化建议和文献推荐。",
23
- )
24
-
25
- iface.launch()
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
+ # 你自己的模型 repo
6
+ model_id = "Seunggg/lora-plant"
7
 
8
+ # 加载模型和 tokenizer
9
  tokenizer = AutoTokenizer.from_pretrained(model_id)
10
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, device_map="auto")
11
 
12
+ # 定义接口函数
13
+ def plant_chat(user_input):
14
+ prompt = f"用户提问:{user_input}\n请用人性化语言回答,并推荐相关的植物资料或文献:\n回答:"
15
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
16
+ outputs = model.generate(**inputs, max_new_tokens=256)
17
+ answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
18
+ return answer
19
 
20
+ # 启动 Gradio 接口
21
+ gr.Interface(fn=plant_chat,
22
+ inputs="text",
23
+ outputs="text",
24
+ title="🌿 植物问答助手",
25
+ description="根据你的问题,提供植物养护建议和文献线索。").launch()