Spaces:
Sleeping
Sleeping
intial commit
Browse files- .gitignore.txt +51 -0
- requirements.txt +3 -0
- translation.py +79 -0
.gitignore.txt
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Ignore Python cache files
|
2 |
+
__pycache__/
|
3 |
+
*.pyc
|
4 |
+
*.pyo
|
5 |
+
*.pyd
|
6 |
+
|
7 |
+
# Ignore virtual environments and environment variables
|
8 |
+
venv/
|
9 |
+
.env
|
10 |
+
*.env
|
11 |
+
|
12 |
+
# Ignore package manager files
|
13 |
+
poetry.lock
|
14 |
+
pip-log.txt
|
15 |
+
pip-delete-this-directory.txt
|
16 |
+
|
17 |
+
# Ignore Jupyter Notebook checkpoints (if using Colab or Jupyter)
|
18 |
+
.ipynb_checkpoints/
|
19 |
+
|
20 |
+
# Ignore Gradio cache and temporary files
|
21 |
+
gradio/
|
22 |
+
|
23 |
+
# Ignore Hugging Face transformers cache
|
24 |
+
~/.cache/
|
25 |
+
huggingface/
|
26 |
+
transformers_cache/
|
27 |
+
datasets_cache/
|
28 |
+
|
29 |
+
# Ignore large model files
|
30 |
+
*.pt
|
31 |
+
*.pth
|
32 |
+
*.bin
|
33 |
+
*.h5
|
34 |
+
*.onnx
|
35 |
+
*.tflite
|
36 |
+
models/
|
37 |
+
checkpoints/
|
38 |
+
|
39 |
+
# Ignore logs and temporary files
|
40 |
+
*.log
|
41 |
+
*.csv
|
42 |
+
*.tsv
|
43 |
+
*.sqlite
|
44 |
+
*.db
|
45 |
+
|
46 |
+
# Ignore VS Code settings (if using VS Code)
|
47 |
+
.vscode/
|
48 |
+
|
49 |
+
# Ignore system-specific files
|
50 |
+
.DS_Store
|
51 |
+
Thumbs.db
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
transformers
|
3 |
+
gradio
|
translation.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""translation.ipynb
|
3 |
+
|
4 |
+
Automatically generated by Colab.
|
5 |
+
|
6 |
+
Original file is located at
|
7 |
+
https://colab.research.google.com/drive/1bKzjrfpxJqSrJQUuGR27-kRCSndSClBo
|
8 |
+
"""
|
9 |
+
|
10 |
+
|
11 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
12 |
+
import torch
|
13 |
+
import gradio as gr
|
14 |
+
|
15 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
16 |
+
|
17 |
+
language_model_name = "Qwen/Qwen2-1.5B-Instruct"
|
18 |
+
language_model = AutoModelForCausalLM.from_pretrained(
|
19 |
+
language_model_name,
|
20 |
+
torch_dtype="auto",
|
21 |
+
device_map="auto"
|
22 |
+
)
|
23 |
+
tokenizer = AutoTokenizer.from_pretrained(language_model_name)
|
24 |
+
|
25 |
+
def process_input(input_text, action):
|
26 |
+
if action == "Translate to Japanese":
|
27 |
+
prompt = f"Please translate the following English text into Japanese: {input_text}"
|
28 |
+
lang = "ja"
|
29 |
+
elif action == "Translate to English":
|
30 |
+
prompt = f"Please translate the following Japanese text into English: {input_text}"
|
31 |
+
lang = "en"
|
32 |
+
else:
|
33 |
+
return "Invalid action. Please choose 'Translate to English' or 'Translate to Japanese'.", "error"
|
34 |
+
|
35 |
+
messages = [
|
36 |
+
{"role": "system", "content": "You are a helpful AI assistant for Language Translation."},
|
37 |
+
{"role": "user", "content": prompt}
|
38 |
+
]
|
39 |
+
text = tokenizer.apply_chat_template(
|
40 |
+
messages,
|
41 |
+
tokenize=False,
|
42 |
+
add_generation_prompt=True
|
43 |
+
)
|
44 |
+
model_inputs = tokenizer([text], return_tensors="pt").to(device)
|
45 |
+
|
46 |
+
generated_ids = language_model.generate(
|
47 |
+
model_inputs.input_ids,
|
48 |
+
max_new_tokens=512
|
49 |
+
)
|
50 |
+
generated_ids = [
|
51 |
+
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
52 |
+
]
|
53 |
+
|
54 |
+
output_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
55 |
+
return output_text, lang
|
56 |
+
|
57 |
+
def handle_interaction(input_text, action):
|
58 |
+
output_text, lang = process_input(input_text, action)
|
59 |
+
return output_text
|
60 |
+
|
61 |
+
action_options = ["Translate to English", "Translate to Japanese"]
|
62 |
+
|
63 |
+
iface = gr.Interface(
|
64 |
+
fn=handle_interaction,
|
65 |
+
inputs=[
|
66 |
+
gr.Textbox(label="input text"),
|
67 |
+
gr.Dropdown(action_options, label="select action")
|
68 |
+
],
|
69 |
+
outputs=[
|
70 |
+
gr.Textbox(label="output text"),
|
71 |
+
],
|
72 |
+
title="Translation App using AI",
|
73 |
+
description="Translate input text based on the selected Language.",
|
74 |
+
theme= "gradio/soft"
|
75 |
+
)
|
76 |
+
|
77 |
+
if __name__ == "__main__":
|
78 |
+
iface.launch(share=True)
|
79 |
+
|