Update app.py
Browse files
app.py
CHANGED
|
@@ -2,20 +2,18 @@ import os
|
|
| 2 |
import shutil
|
| 3 |
from huggingface_hub import snapshot_download
|
| 4 |
import gradio as gr
|
| 5 |
-
os.chdir(os.path.dirname(os.path.abspath(__file__)))
|
| 6 |
-
from scripts.inference import inference_process
|
| 7 |
import argparse
|
| 8 |
import uuid
|
| 9 |
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
| 14 |
|
| 15 |
def run_inference(source_image, driving_audio, progress=gr.Progress(track_tqdm=True)):
|
| 16 |
-
if is_shared_ui:
|
| 17 |
-
raise gr.Error("This Space only works in duplicated instances")
|
| 18 |
-
|
| 19 |
unique_id = uuid.uuid4()
|
| 20 |
|
| 21 |
args = argparse.Namespace(
|
|
@@ -33,7 +31,6 @@ def run_inference(source_image, driving_audio, progress=gr.Progress(track_tqdm=T
|
|
| 33 |
inference_process(args)
|
| 34 |
return f'output-{unique_id}.mp4'
|
| 35 |
|
| 36 |
-
|
| 37 |
css = '''
|
| 38 |
div#warning-ready {
|
| 39 |
background-color: #ecfdf5;
|
|
@@ -72,23 +69,6 @@ div#warning-duplicate .actions a {
|
|
| 72 |
'''
|
| 73 |
|
| 74 |
with gr.Blocks(css=css) as demo:
|
| 75 |
-
if is_shared_ui:
|
| 76 |
-
top_description = gr.HTML(f'''
|
| 77 |
-
<div class="gr-prose">
|
| 78 |
-
<h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
|
| 79 |
-
Attention: this Space need to be duplicated to work</h2>
|
| 80 |
-
<p class="main-message custom-color">
|
| 81 |
-
To make it work, <strong>duplicate the Space</strong> and run it on your own profile using a <strong>private</strong> GPU.<br />
|
| 82 |
-
An L4 costs <strong>US$0.80/h</strong>
|
| 83 |
-
</p>
|
| 84 |
-
<p class="actions custom-color">
|
| 85 |
-
<a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true">
|
| 86 |
-
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg-dark.svg" alt="Duplicate this Space" />
|
| 87 |
-
</a>
|
| 88 |
-
to start generate your talking head
|
| 89 |
-
</p>
|
| 90 |
-
</div>
|
| 91 |
-
''', elem_id="warning-duplicate")
|
| 92 |
gr.Markdown("# Demo for Hallo: Hierarchical Audio-Driven Visual Synthesis for Portrait Image Animation")
|
| 93 |
gr.Markdown("Generate talking head avatars driven from audio. **5 seconds of audio takes >10 minutes to generate on an L4** - duplicate the space for private use or try for free on Google Colab")
|
| 94 |
with gr.Row():
|
|
@@ -105,4 +85,4 @@ with gr.Blocks(css=css) as demo:
|
|
| 105 |
outputs=output_video
|
| 106 |
)
|
| 107 |
|
| 108 |
-
demo.launch()
|
|
|
|
| 2 |
import shutil
|
| 3 |
from huggingface_hub import snapshot_download
|
| 4 |
import gradio as gr
|
|
|
|
|
|
|
| 5 |
import argparse
|
| 6 |
import uuid
|
| 7 |
|
| 8 |
+
# Ensure the current working directory is the script's directory
|
| 9 |
+
os.chdir(os.path.dirname(os.path.abspath(__file__)))
|
| 10 |
+
|
| 11 |
+
from scripts.inference import inference_process
|
| 12 |
|
| 13 |
+
# Download the model to the local directory
|
| 14 |
+
hallo_dir = snapshot_download(repo_id="fudan-generative-ai/hallo", local_dir="pretrained_models")
|
| 15 |
|
| 16 |
def run_inference(source_image, driving_audio, progress=gr.Progress(track_tqdm=True)):
|
|
|
|
|
|
|
|
|
|
| 17 |
unique_id = uuid.uuid4()
|
| 18 |
|
| 19 |
args = argparse.Namespace(
|
|
|
|
| 31 |
inference_process(args)
|
| 32 |
return f'output-{unique_id}.mp4'
|
| 33 |
|
|
|
|
| 34 |
css = '''
|
| 35 |
div#warning-ready {
|
| 36 |
background-color: #ecfdf5;
|
|
|
|
| 69 |
'''
|
| 70 |
|
| 71 |
with gr.Blocks(css=css) as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
gr.Markdown("# Demo for Hallo: Hierarchical Audio-Driven Visual Synthesis for Portrait Image Animation")
|
| 73 |
gr.Markdown("Generate talking head avatars driven from audio. **5 seconds of audio takes >10 minutes to generate on an L4** - duplicate the space for private use or try for free on Google Colab")
|
| 74 |
with gr.Row():
|
|
|
|
| 85 |
outputs=output_video
|
| 86 |
)
|
| 87 |
|
| 88 |
+
demo.launch()
|