Emma0123 commited on
Commit
e936e9a
·
verified ·
1 Parent(s): 0aed71c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -18
app.py CHANGED
@@ -1,23 +1,20 @@
1
- tokenizer1 = AutoTokenizer.from_pretrained("Emma0123/fine_tuned_model")
2
- model1 = AutoModelForSequenceClassification.from_pretrained("Emma0123/fine_tuned_model")
3
 
4
- tokenizer2 = AutoTokenizer.from_pretrained("jonas/roberta-base-finetuned-sdg")
5
- model2 = AutoModelForSequenceClassification.from_pretrained("jonas/roberta-base-finetuned-sdg")
 
6
 
7
- # 输入文本
8
- input_text = input()
9
 
10
- # 对第一个模型进行推理
11
- inputs = tokenizer1(input_text, return_tensors="pt", truncation=True)
12
- outputs = model1(**inputs)
13
- predictions = torch.argmax(outputs.logits, dim=1).item()
14
 
15
- # 根据第一个模型的输出进行条件判断
16
- if predictions == 1:
17
- # 使用第二个模型进行判断
18
- inputs2 = tokenizer2(input_text, return_tensors="pt", truncation=True)
19
- outputs2 = model2(**inputs2)
20
- predictions2 = torch.argmax(outputs2.logits, dim=1).item()
21
- print("Second model prediction:", predictions2)
22
  else:
23
- print("This content is unrelated to Environment.")
 
 
1
+ from transformers import pipeline
 
2
 
3
+ # 初始化两个模型
4
+ pipe1 = pipeline("text-classification", model="Emma0123/fine_tuned_model")
5
+ pipe2 = pipeline("text-classification", model="jonas/roberta-base-finetuned-sdg")
6
 
7
+ # 获取用户输入的文本
8
+ input_text = input("Please enter the text: ")
9
 
10
+ # 使用第一个模型进行预测
11
+ result1 = pipe1(input_text)
 
 
12
 
13
+ # 判断第一个模型的输出结果
14
+ if result1[0]['label'] == 'LABEL_1': # 根据您的模型实际返回的标签进行修改
15
+ # 如果输出结果为1(或者对应的标签),将输入文本传递给第二个模型
16
+ result2 = pipe2(input_text)
17
+ print(result2)
 
 
18
  else:
19
+ # 如果输出结果为0(或者对应的标签),打印提示信息
20
+ print("This content is unrelated to Environment.")