Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,23 +7,33 @@ model_name = "maulanayyy/codet5_code_translation" # Ganti dengan nama model yan
|
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
9 |
|
|
|
|
|
10 |
# Function to perform inference
|
11 |
def translate_code(input_code):
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
-
#
|
|
|
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
9 |
|
10 |
+
print("Model and tokenizer loaded successfully.")
|
11 |
+
|
12 |
# Function to perform inference
|
13 |
def translate_code(input_code):
|
14 |
+
try:
|
15 |
+
# Prepare the input text
|
16 |
+
input_text = f"translate Java to C#: {input_code}"
|
17 |
+
|
18 |
+
# Check if GPU is available
|
19 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
20 |
+
|
21 |
+
# Tokenize the input
|
22 |
+
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device) # Pastikan input_ids ada di GPU
|
23 |
+
|
24 |
+
# Generate the output
|
25 |
+
with torch.no_grad():
|
26 |
+
outputs = model.generate(input_ids, max_length=512)
|
27 |
+
|
28 |
+
# Decode the output
|
29 |
+
translated_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
30 |
+
return translated_code
|
31 |
+
except Exception as e:
|
32 |
+
print(f"Error during translation: {e}")
|
33 |
+
return "An error occurred during translation."
|
34 |
+
|
35 |
+
# Create Gradio interface
|
36 |
+
demo = gr.Interface(fn=translate_code, inputs="text", outputs="text", title="Java to C# Code Translator", description="Enter Java code to translate it to C#.")
|
37 |
|
38 |
+
# Launch the interface
|
39 |
+
demo.launch(share=True)
|