Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,34 +1,15 @@
|
|
1 |
from huggingface_hub import from_pretrained_keras
|
2 |
import gradio as gr
|
3 |
-
import json
|
4 |
-
|
5 |
-
# Set the path to the directory containing the model files
|
6 |
-
model_directory = "Bajiyo/Malayalam_transliteration"
|
7 |
-
|
8 |
-
# Set the paths to the tokenizer configuration files
|
9 |
-
source_tokenizer_config_path = f"{model_directory}/source_tokenizer_config.json"
|
10 |
-
target_tokenizer_config_path = f"{model_directory}/target_tokenizer_config.json"
|
11 |
-
|
12 |
-
# Load tokenizer configurations
|
13 |
-
with open(source_tokenizer_config_path, "r") as source_config_file:
|
14 |
-
source_tokenizer_config = json.load(source_config_file)
|
15 |
-
|
16 |
-
with open(target_tokenizer_config_path, "r") as target_config_file:
|
17 |
-
target_tokenizer_config = json.load(target_config_file)
|
18 |
|
19 |
# Load the model from Hugging Face
|
20 |
model = from_pretrained_keras("Bajiyo/Malayalam_transliteration")
|
21 |
|
22 |
def transliterate(input_text):
|
23 |
-
#
|
24 |
-
|
25 |
-
inputs = tokenize_input(input_text, source_tokenizer_config)
|
26 |
-
|
27 |
-
# Make predictions using the model
|
28 |
-
predictions = model.predict(inputs)
|
29 |
|
30 |
# Post-process the predictions if needed
|
31 |
-
output_text = post_process_predictions(predictions
|
32 |
|
33 |
return output_text
|
34 |
|
|
|
1 |
from huggingface_hub import from_pretrained_keras
|
2 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
# Load the model from Hugging Face
|
5 |
model = from_pretrained_keras("Bajiyo/Malayalam_transliteration")
|
6 |
|
7 |
def transliterate(input_text):
|
8 |
+
# Make predictions using the model directly
|
9 |
+
predictions = model.predict(input_text)
|
|
|
|
|
|
|
|
|
10 |
|
11 |
# Post-process the predictions if needed
|
12 |
+
output_text = post_process_predictions(predictions)
|
13 |
|
14 |
return output_text
|
15 |
|