ziwaixian009 commited on
Commit
8f11c9f
·
verified ·
1 Parent(s): 4859176

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -5
app.py CHANGED
@@ -7,17 +7,34 @@
7
  # demo.launch()
8
  import requests
9
  from PIL import Image
10
- from transformers import AutoModelForCausalLM, AutoProcessor
11
  import torch
12
  import gradio as gr
13
 
14
  # 设置设备
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
16
 
17
- # 加载模型和处理器
18
  model = AutoModelForCausalLM.from_pretrained("MiaoshouAI/Florence-2-base-PromptGen-v1.5", trust_remote_code=True).to(device)
19
  processor = AutoProcessor.from_pretrained("MiaoshouAI/Florence-2-base-PromptGen-v1.5", trust_remote_code=True)
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  def generate_caption(image_url):
22
  try:
23
  # 下载并打开图像
@@ -40,7 +57,10 @@ def generate_caption(image_url):
40
  # 解析生成的文本
41
  parsed_answer = processor.post_process_generation(generated_text, task=prompt, image_size=(image.width, image.height))
42
 
43
- return parsed_answer
 
 
 
44
  except Exception as e:
45
  return f"Error: {str(e)}"
46
 
@@ -53,9 +73,9 @@ def gradio_interface(image_url):
53
  iface = gr.Interface(
54
  fn=gradio_interface, # 处理函数
55
  inputs=gr.Textbox(label="Image URL", placeholder="Enter the URL of the image..."), # 输入组件
56
- outputs=gr.Textbox(label="Generated Caption"), # 输出组件
57
  title="Florence-2 Prompt Generation", # 标题
58
- description="Generate detailed captions for images using Florence-2 model.", # 描述
59
  examples=[
60
  ["https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"]
61
  ] # 示例
 
7
  # demo.launch()
8
  import requests
9
  from PIL import Image
10
+ from transformers import AutoModelForCausalLM, AutoProcessor, MarianMTModel, MarianTokenizer
11
  import torch
12
  import gradio as gr
13
 
14
  # 设置设备
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
16
 
17
+ # 加载 Florence-2 模型和处理器
18
  model = AutoModelForCausalLM.from_pretrained("MiaoshouAI/Florence-2-base-PromptGen-v1.5", trust_remote_code=True).to(device)
19
  processor = AutoProcessor.from_pretrained("MiaoshouAI/Florence-2-base-PromptGen-v1.5", trust_remote_code=True)
20
 
21
+ # 加载 Helsinki-NLP 的翻译模型(英文到中文)
22
+ translation_model_name = "Helsinki-NLP/opus-mt-en-zh"
23
+ translation_tokenizer = MarianTokenizer.from_pretrained(translation_model_name)
24
+ translation_model = MarianMTModel.from_pretrained(translation_model_name).to(device)
25
+
26
+ # 翻译函数
27
+ def translate_to_chinese(text):
28
+ try:
29
+ # 分词和翻译
30
+ tokenized_text = translation_tokenizer(text, return_tensors="pt", max_length=512, truncation=True).to(device)
31
+ translated_tokens = translation_model.generate(**tokenized_text)
32
+ translated_text = translation_tokenizer.decode(translated_tokens[0], skip_special_tokens=True)
33
+ return translated_text
34
+ except Exception as e:
35
+ return f"Translation error: {str(e)}"
36
+
37
+ # 生成描述并翻译
38
  def generate_caption(image_url):
39
  try:
40
  # 下载并打开图像
 
57
  # 解析生成的文本
58
  parsed_answer = processor.post_process_generation(generated_text, task=prompt, image_size=(image.width, image.height))
59
 
60
+ # 翻译成中文
61
+ translated_answer = translate_to_chinese(parsed_answer)
62
+
63
+ return translated_answer
64
  except Exception as e:
65
  return f"Error: {str(e)}"
66
 
 
73
  iface = gr.Interface(
74
  fn=gradio_interface, # 处理函数
75
  inputs=gr.Textbox(label="Image URL", placeholder="Enter the URL of the image..."), # 输入组件
76
+ outputs=gr.Textbox(label="Generated Caption (Translated to Chinese)"), # 输出组件
77
  title="Florence-2 Prompt Generation", # 标题
78
+ description="Generate detailed captions for images using Florence-2 model and translate them to Chinese.", # 描述
79
  examples=[
80
  ["https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg?download=true"]
81
  ] # 示例