Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,9 @@ model_name = "maulanayyy/codet5_code_translation" # Ganti dengan nama model yan
|
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
9 |
|
10 |
-
|
|
|
|
|
11 |
|
12 |
# Function to perform inference
|
13 |
def translate_code(input_code):
|
@@ -15,15 +17,12 @@ def translate_code(input_code):
|
|
15 |
# Prepare the input text
|
16 |
input_text = f"translate Java to C#: {input_code}"
|
17 |
|
18 |
-
# Check if GPU is available
|
19 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
20 |
-
|
21 |
# Tokenize the input
|
22 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device) # Pastikan input_ids ada di GPU
|
23 |
|
24 |
# Generate the output
|
25 |
with torch.no_grad():
|
26 |
-
outputs = model.generate(input_ids, max_length=
|
27 |
|
28 |
# Decode the output
|
29 |
translated_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
9 |
|
10 |
+
# Check if GPU is available
|
11 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
+
model.to(device) # Pindahkan model ke GPU jika tersedia
|
13 |
|
14 |
# Function to perform inference
|
15 |
def translate_code(input_code):
|
|
|
17 |
# Prepare the input text
|
18 |
input_text = f"translate Java to C#: {input_code}"
|
19 |
|
|
|
|
|
|
|
20 |
# Tokenize the input
|
21 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device) # Pastikan input_ids ada di GPU
|
22 |
|
23 |
# Generate the output
|
24 |
with torch.no_grad():
|
25 |
+
outputs = model.generate(input_ids, max_length=256) # Kurangi max_length jika perlu
|
26 |
|
27 |
# Decode the output
|
28 |
translated_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
|