Update app.py
Browse files
app.py
CHANGED
@@ -33,14 +33,16 @@ model_list = [
|
|
33 |
]
|
34 |
|
35 |
def predict(text, target, chosen_model):
|
36 |
-
model1 = tf.keras.models.load_model(chosen_model, custom_objects={"TFBertModel": TFBertModel})
|
|
|
|
|
37 |
checkpoint = "neuralmind/bert-base-portuguese-cased"
|
38 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint, use_fast=True, model_max_length=512)
|
39 |
-
tokpair = tokenizer(text, target, truncation=True, padding='max_length', return_tensors='
|
40 |
|
41 |
-
outp = model1(tokpair)
|
42 |
|
43 |
-
proto_tensor = tf.make_tensor_proto(outp)
|
44 |
allscores = tf.make_ndarray(proto_tensor)[0]
|
45 |
|
46 |
scores_dict = {
|
|
|
33 |
]
|
34 |
|
35 |
def predict(text, target, chosen_model):
|
36 |
+
# model1 = tf.keras.models.load_model(chosen_model, custom_objects={"TFBertModel": TFBertModel})
|
37 |
+
model1 = from_pretrained_keras(chosen_model)
|
38 |
+
|
39 |
checkpoint = "neuralmind/bert-base-portuguese-cased"
|
40 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint, use_fast=True, model_max_length=512)
|
41 |
+
tokpair = tokenizer(text, target, truncation=True, padding='max_length', return_tensors='tf', return_token_type_ids=False)
|
42 |
|
43 |
+
outp = model1.signatures["serving_default"](**tokpair)
|
44 |
|
45 |
+
proto_tensor = tf.make_tensor_proto(outp['outp'])
|
46 |
allscores = tf.make_ndarray(proto_tensor)[0]
|
47 |
|
48 |
scores_dict = {
|