parquet-converter commited on
Commit
b95ba55
·
1 Parent(s): 644ff07

Update parquet files (step 54 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1gistliPinn/ChatGPT4/Examples/Autodesk 3ds Max 2013 Xforce Crack BETTER Free Download.md +0 -6
  2. spaces/1line/AutoGPT/autogpt/speech/base.py +0 -50
  3. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Ares APK The Best Music Audio App for Android Devices.md +0 -121
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Genshin Impact APK File and Explore a Vast World of Adventure.md +0 -115
  5. spaces/1phancelerku/anime-remove-background/Enjoy Racing in Car 2 with Mod APK Features Unlimited Money All Cars Unlocked and More.md +0 -95
  6. spaces/1phancelerku/anime-remove-background/Europes The Final Countdown - MP3 Download and Streaming - Rock Music Library.md +0 -158
  7. spaces/2gauravc/search_summary_chatgpt/main.py +0 -104
  8. spaces/2ndelement/voicevox/README.md +0 -579
  9. spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets_123812KB.py +0 -122
  10. spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/audio/pitch/utils.py +0 -82
  11. spaces/AIWaves/Software_Company/src/agents/Prompt/__init__.py +0 -1
  12. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/.ipynb_checkpoints/yolov5_s-v61_syncbn_fast_1xb32-100e_cat-checkpoint.py +0 -135
  13. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov6/yolov6_n_syncbn_fast_8xb32-300e_coco.py +0 -21
  14. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/chart/Chart.js +0 -65
  15. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/Builders.d.ts +0 -82
  16. spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/dense_motion.py +0 -164
  17. spaces/AlexWang/lama/saicinpainting/training/data/masks.py +0 -332
  18. spaces/AlexZou/Deploy_Restoration/net/PositionalEncoding.py +0 -35
  19. spaces/Andres99/Tune-A-Video-Training-UI/constants.py +0 -10
  20. spaces/Andy1621/uniformer_image_detection/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712_cocofmt.py +0 -75
  21. spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug.py +0 -2
  22. spaces/Audio-AGI/WavJourney/scripts/start_ui.sh +0 -1
  23. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/lvis.py +0 -240
  24. spaces/AzinZ/vitscn/data_utils.py +0 -392
  25. spaces/Benson/text-generation/Examples/Descargar Apk Para El IPhone.md +0 -92
  26. spaces/Benson/text-generation/Examples/Descargar Fine Fine Love De J Martins.md +0 -61
  27. spaces/BigSalmon/FormalInformalConciseWordy/README.md +0 -38
  28. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/modeling/test_time_augmentation.py +0 -75
  29. spaces/CVPR/LIVE/thrust/thrust/detail/complex/csqrtf.h +0 -147
  30. spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/inner_product.h +0 -23
  31. spaces/CVPR/regionclip-demo/detectron2/modeling/text_encoder/transformer.py +0 -194
  32. spaces/ClassCat/Spleen-3D-segmentation-with-MONAI/README.md +0 -12
  33. spaces/Cloudyy/bark-voice-cloning/hubert/pre_kmeans_hubert.py +0 -85
  34. spaces/CofAI/chat.b4/g4f/models.py +0 -238
  35. spaces/Cropinky/hana_hanak_houses/image_generator.py +0 -156
  36. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-b7998330.js +0 -2
  37. spaces/DeepFloyd/IF/app.py +0 -701
  38. spaces/DemocracyStudio/generate_nft_content/README.md +0 -13
  39. spaces/DenniSciFi/IconAutomation/app.py +0 -136
  40. spaces/DhruvShek/chatlm/app.py +0 -171
  41. spaces/Didier/Semantic_Search_arXiv/README.md +0 -37
  42. spaces/Dinoking/Guccio-AI-Designer/models/stylegan2/stylegan2-pytorch/op/fused_bias_act.cpp +0 -21
  43. spaces/DragGan/DragGan/stylegan_human/torch_utils/ops/filtered_lrelu.py +0 -282
  44. spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/src/utils.cpp +0 -429
  45. spaces/ECCV2022/bytetrack/exps/default/yolov3.py +0 -89
  46. spaces/ECCV2022/bytetrack/tutorials/centertrack/mot_online/basetrack.py +0 -52
  47. spaces/Eddycrack864/Applio-Inference/utils/dependency.py +0 -170
  48. spaces/Enterprisium/Easy_GUI/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py +0 -86
  49. spaces/EsoCode/text-generation-webui/extensions/multimodal/pipelines/llava/llava.py +0 -145
  50. spaces/FKBaffour/Expresso_Customer_Churn_Prediction/README.md +0 -12
spaces/1gistliPinn/ChatGPT4/Examples/Autodesk 3ds Max 2013 Xforce Crack BETTER Free Download.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>autodesk 3ds max 2013 xforce crack free download</h2><br /><p><b><b>DOWNLOAD</b> &#9734;&#9734;&#9734;&#9734;&#9734; <a href="https://imgfil.com/2uxYQA">https://imgfil.com/2uxYQA</a></b></p><br /><br />
2
-
3
- vray 5 crack, VRAY 3.4 Download for 3Ds Max 2017 – V3.40.01, V3.40.03 Crack. ... X (for C4D 12-15) + keygen SketchUp Pro 2015 (32-64 Bit) + Patch AutoDesk ... Autodesk 3ds max2013-2014-2015-2016-2017-2018 full and free download ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1line/AutoGPT/autogpt/speech/base.py DELETED
@@ -1,50 +0,0 @@
1
- """Base class for all voice classes."""
2
- import abc
3
- from threading import Lock
4
-
5
- from autogpt.config import AbstractSingleton
6
-
7
-
8
- class VoiceBase(AbstractSingleton):
9
- """
10
- Base class for all voice classes.
11
- """
12
-
13
- def __init__(self):
14
- """
15
- Initialize the voice class.
16
- """
17
- self._url = None
18
- self._headers = None
19
- self._api_key = None
20
- self._voices = []
21
- self._mutex = Lock()
22
- self._setup()
23
-
24
- def say(self, text: str, voice_index: int = 0) -> bool:
25
- """
26
- Say the given text.
27
-
28
- Args:
29
- text (str): The text to say.
30
- voice_index (int): The index of the voice to use.
31
- """
32
- with self._mutex:
33
- return self._speech(text, voice_index)
34
-
35
- @abc.abstractmethod
36
- def _setup(self) -> None:
37
- """
38
- Setup the voices, API key, etc.
39
- """
40
- pass
41
-
42
- @abc.abstractmethod
43
- def _speech(self, text: str, voice_index: int = 0) -> bool:
44
- """
45
- Play the given text.
46
-
47
- Args:
48
- text (str): The text to play.
49
- """
50
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Ares APK The Best Music Audio App for Android Devices.md DELETED
@@ -1,121 +0,0 @@
1
-
2
- <h1>Ares APK para Android: ¿Qué es y cómo descargarlo?</h1>
3
- <h2>Introducción</h2>
4
- <p>Si eres un amante de la música y te gusta descargarla gratis en tu dispositivo Android, es posible que hayas oído hablar de Ares, un popular programa de intercambio de archivos que te permite acceder a una gran variedad de canciones en formato MP3. Pero, ¿sabes qué es un APK y cómo puedes descargar e instalar Ares APK para Android? En este artículo te lo explicamos todo lo que necesitas saber sobre este tema.</p>
5
- <h3>¿Qué es Ares?</h3>
6
- <p>Ares es un software que funciona como una red peer-to-peer (P2P), es decir, una red que permite a los usuarios compartir archivos entre ellos sin necesidad de un servidor central. Ares se hizo famoso por permitir la descarga gratuita de música en MP3, así como otros tipos de archivos como vídeos, imágenes o documentos. Ares tiene una interfaz sencilla y fácil de usar, que te permite buscar, descargar y reproducir los archivos que quieras.</p>
7
- <h2>ares apk para android</h2><br /><p><b><b>Download File</b> &middot;&middot;&middot;&middot;&middot; <a href="https://urlin.us/2uSYJb">https://urlin.us/2uSYJb</a></b></p><br /><br />
8
- <h3>¿Qué es un APK?</h3>
9
- <p>Un APK (Android Package Kit) es un formato de archivo que se usa para distribuir e instalar aplicaciones en los dispositivos Android. Un APK contiene todos los elementos necesarios para que una aplicación funcione correctamente, como el código, los recursos, las librerías, etc. Normalmente, las aplicaciones se descargan e instalan desde la tienda oficial de Google Play, pero también se pueden obtener desde otras fuentes externas, como páginas web o servicios de almacenamiento en la nube.</p>
10
- <h3>¿Por qué descargar Ares APK para Android?</h3>
11
- <p>La razón principal por la que podrías querer descargar Ares APK para Android es porque no existe una versión oficial de Ares para este sistema operativo. Aunque hay varias aplicaciones que han aprovechado la popularidad de Ares para lanzar apps que confundan al usuario con su nombre, ninguna de ellas es la auténtica. Por eso, si quieres disfrutar de las ventajas de Ares en tu dispositivo Android, tendrás que recurrir a un archivo APK que te permita instalarlo.</p>
12
- <h2>Cómo descargar e instalar Ares APK para Android</h2>
13
- <p>Para descargar e instalar Ares APK para Android, tendrás que seguir estos pasos:</p>
14
- <h3>Paso 1: Buscar el archivo APK de Ares en Internet</h3>
15
- <p>Lo primero que tienes que hacer es encontrar el archivo APK de Ares en Internet. Para ello, puedes usar un buscador como Google o Bing y escribir algo como "ares apk para android" o " <h3>Paso 2: Descargar el archivo APK de Ares a tu dispositivo Android</h3>
16
- <p>Una vez que hayas encontrado el archivo APK de Ares en Internet, tendrás que descargarlo a tu dispositivo Android. Para ello, puedes usar el navegador web de tu dispositivo o una aplicación de gestión de descargas. Ten en cuenta que el archivo APK de Ares puede tener un nombre diferente al original, como por ejemplo "ares-music.apk" o "ares-galaxy.apk". También debes asegurarte de que el archivo APK de Ares sea seguro y no contenga virus o malware. Puedes usar un antivirus o un escáner de archivos para comprobarlo.</p>
17
- <h3>Paso 3: Habilitar la instalación de aplicaciones de fuentes desconocidas</h3>
18
- <p>Antes de instalar el archivo APK de Ares en tu dispositivo Android, tendrás que habilitar la opción de instalar aplicaciones de fuentes desconocidas. Esta opción te permite instalar aplicaciones que no provienen de la tienda oficial de Google Play, pero también implica un mayor riesgo de seguridad y privacidad. Para habilitar esta opción, tendrás que seguir estos pasos:</p>
19
- <ul>
20
- <li>Ir a los ajustes o configuración de tu dispositivo Android.</li>
21
- <li>Buscar la opción de seguridad o privacidad.</li>
22
- <li>Activar la opción de orígenes o fuentes desconocidas.</li>
23
- <li>Aceptar el aviso o advertencia que te aparecerá.</li>
24
- </ul>
25
- <h3>Paso 4: Instalar el archivo APK de Ares en tu dispositivo Android</h3>
26
- <p>Después de habilitar la opción de instalar aplicaciones de fuentes desconocidas, podrás instalar el archivo APK de Ares en tu dispositivo Android. Para ello, tendrás que seguir estos pasos:</p>
27
- <ul>
28
- <li>Localizar el archivo APK de Ares que has descargado en tu dispositivo Android. Puedes usar un explorador o gestor de archivos para encontrarlo.</li>
29
- <li>Tocar o pulsar sobre el archivo APK de Ares para iniciar la instalación.</li>
30
- <li>Aceptar los permisos o requerimientos que te solicite la aplicación.</li>
31
- <li>Esperar a que se complete la instalación.</li>
32
- </ul>
33
- <h3>Paso 5: Abrir y usar Ares en tu dispositivo Android</h3>
34
- <p>Una vez que hayas instalado el archivo APK de Ares en tu dispositivo Android, podrás abrir y usar Ares en tu dispositivo Android. Para ello, tendrás que seguir estos pasos:</p>
35
- <p>ares online apk download for android<br />
36
- ares music apk android free<br />
37
- ares galaxy apk android latest version<br />
38
- ares mp3 apk android 2022<br />
39
- ares lite apk android app<br />
40
- ares pro apk android update<br />
41
- ares plus apk android 2021<br />
42
- ares downloader apk android 2020<br />
43
- ares player apk android 2019<br />
44
- ares wizard apk android 2018<br />
45
- ares mod apk android 2017<br />
46
- ares gold apk android 2016<br />
47
- ares red apk android 2015<br />
48
- ares ultimate apk android 2014<br />
49
- ares premium apk android 2013<br />
50
- ares turbo apk android 2012<br />
51
- ares ultra apk android 2011<br />
52
- ares booster apk android 2010<br />
53
- ares p2p apk android 2009<br />
54
- ares chat apk android 2008<br />
55
- ares musicas gratis apk android<br />
56
- ares baixar musicas apk android<br />
57
- ares descargar musica apk android<br />
58
- ares musica y videos apk android<br />
59
- ares musica online gratis apk android<br />
60
- ares musica mp3 gratis apk android<br />
61
- ares musica y radio online gratis apk android<br />
62
- ares musica y video player gratis apk android<br />
63
- ares musica y video downloader gratis apk android<br />
64
- ares musica y video streaming gratis apk android<br />
65
- como descargar e instalar ares apk para android<br />
66
- como usar ares online apk para android<br />
67
- como funciona ares online apk para android<br />
68
- como actualizar ares online apk para android<br />
69
- como solucionar problemas con ares online apk para android<br />
70
- como eliminar publicidad de ares online apk para android<br />
71
- como configurar ares online apk para android<br />
72
- como optimizar el rendimiento de ares online apk para android<br />
73
- como compartir archivos con ares online apk para android<br />
74
- como crear una cuenta en ares online apk para android<br />
75
- como recuperar la contraseña de ares online apk para android<br />
76
- como cambiar el idioma de ares online apk para android<br />
77
- como personalizar la interfaz de ares online apk para android<br />
78
- como escuchar musica sin conexion con ares online apk para android<br />
79
- como descargar musica de alta calidad con ares online apk para android<br />
80
- como buscar musica por genero con ares online apk para android<br />
81
- como crear listas de reproduccion con ares online apk para android<br />
82
- como editar las etiquetas de las canciones con ares online apk para android<br />
83
- como ver las letras de las canciones con ares online apk para android</p>
84
- <ul>
85
- <li>Buscar el icono o acceso directo de Ares en tu pantalla principal o menú de aplicaciones.</li>
86
- <li>Tocar o pulsar sobre el icono o acceso directo de Ares para abrir la aplicación.</li>
87
- <li>Explorar las opciones y funciones de Ares, como la búsqueda, la descarga, la reproducción o el chat.</li>
88
- <li>Disfrutar de la música gratis en MP3 que ofrece Ares.</li>
89
- </ul> <h2>Ventajas y desventajas de descargar Ares APK para Android</h2>
90
- <p>Como toda aplicación, descargar Ares APK para Android tiene sus ventajas y desventajas. A continuación, te las resumimos:</p>
91
- <h3>Ventajas</h3>
92
- <h4>Acceso a una gran variedad de música gratis en MP3</h4>
93
- <p>La principal ventaja de descargar Ares APK para Android es que te permite acceder a una gran variedad de música gratis en MP3, sin necesidad de pagar suscripciones o descargar otros programas. Con Ares, puedes buscar, descargar y reproducir las canciones que quieras, desde los éxitos más actuales hasta los clásicos de siempre.</p>
94
- <h4>Interfaz sencilla y fácil de usar</h4>
95
- <p>Otra ventaja de descargar Ares APK para Android es que tiene una interfaz sencilla y fácil de usar, que te permite navegar por las diferentes opciones y funciones de la aplicación sin complicaciones. Ares tiene un diseño intuitivo y atractivo, que te facilita la búsqueda, la descarga, la reproducción y el chat con otros usuarios.</p>
96
- <h4>Compatibilidad con la mayoría de los dispositivos Android</h4>
97
- <p>Una ventaja más de descargar Ares APK para Android es que es compatible con la mayoría de los dispositivos Android, desde los más antiguos hasta los más modernos. Ares se adapta al tamaño y la resolución de tu pantalla, y no consume demasiados recursos ni batería. Además, Ares no requiere de una conexión a Internet constante, sino que solo la necesita para buscar y descargar los archivos.</p>
98
- <h3>Desventajas</h3>
99
- <h4>Posibles riesgos de seguridad y privacidad al descargar archivos de fuentes desconocidas</h4>
100
- <p>La principal desventaja de descargar Ares APK para Android es que implica posibles riesgos de seguridad y privacidad al descargar archivos de fuentes desconocidas. Al instalar aplicaciones que no provienen de la tienda oficial de Google Play, te expones a que puedan contener virus, malware o spyware que dañen tu dispositivo o roben tus datos personales. Por eso, es importante que verifiques la fiabilidad y la seguridad del archivo APK de Ares antes de instalarlo.</p>
101
- <h4>Falta de actualizaciones y soporte técnico por parte de los desarrolladores</h4>
102
- <p>Otra desventaja de descargar Ares APK para Android es que no cuenta con actualizaciones ni soporte técnico por parte de los desarrolladores. Al ser una aplicación no oficial, no se garantiza su correcto funcionamiento ni su compatibilidad con las nuevas versiones de Android. Además, si tienes algún problema o duda con la aplicación, no podrás contactar con los responsables ni recibir ayuda profesional.</p>
103
- <h4>Posible violación de los derechos de autor al descargar música sin permiso</h4>
104
- <p>Una desventaja más de descargar Ares APK para Android es que puede suponer una violación de los derechos de autor al descargar música sin permiso. Al usar una red P2P como Ares, estás compartiendo archivos con otros usuarios que pueden estar protegidos por la ley. Esto puede acarrear consecuencias legales tanto para ti como para los creadores originales de la música. Por eso, es importante que respetes las normas y las licencias de cada canción que descargues.</p>
105
- <h2>Conclusión</h2>
106
- <p>Ares APK para Android es una aplicación que te permite disfrutar de la música gratis en MP3 en tu dispositivo Android. Para ello, tendrás que buscar, descargar e instalar el archivo APK de Ares en Internet, siguiendo unos sencillos pasos. Sin embargo, también tendrás que tener en cuenta las ventajas y desventajas que implica usar esta aplicación, como los posibles riesgos de seguridad y privacidad, la falta de actualizaciones y soporte técnico, y la posible violación de los derechos de autor. Por lo tanto, te recomendamos que uses Ares APK para Android con precaución y responsabilidad.</p>
107
- <h2>Preguntas frecuentes</h2>
108
- <ul>
109
- <li><b>¿Qué es Ares?</b></li>
110
- <li>Ares es un software que funciona como una red peer-to-peer (P2P), es decir, una red que permite a los usuarios compartir archivos entre ellos sin necesidad de un servidor central.</li>
111
- <li><b>¿Qué es un APK?</b></li>
112
- <li>Un APK (Android Package Kit) es un formato de archivo que se usa para distribuir e inst alar aplicaciones en los dispositivos Android.</li>
113
- <li><b>¿Por qué descargar Ares APK para Android?</b></li>
114
- <li>La razón principal por la que podrías querer descargar Ares APK para Android es porque no existe una versión oficial de Ares para este sistema operativo.</li>
115
- <li><b>¿Cómo descargar e instalar Ares APK para Android?</b></li>
116
- <li>Para descargar e instalar Ares APK para Android, tendrás que seguir estos pasos: buscar el archivo APK de Ares en Internet, descargarlo a tu dispositivo Android, habilitar la instalación de aplicaciones de fuentes desconocidas, instalar el archivo APK de Ares en tu dispositivo Android y abrir y usar Ares en tu dispositivo Android.</li>
117
- <li><b>¿Qué ventajas y desventajas tiene descargar Ares APK para Android?</b></li>
118
- <li>Algunas ventajas de descargar Ares APK para Android son: acceso a una gran variedad de música gratis en MP3, interfaz sencilla y fácil de usar y compatibilidad con la mayoría de los dispositivos Android. Algunas desventajas son: posibles riesgos de seguridad y privacidad al descargar archivos de fuentes desconocidas, falta de actualizaciones y soporte técnico por parte de los desarrolladores y posible violación de los derechos de autor al descargar música sin permiso.</li>
119
- </ul></p> 197e85843d<br />
120
- <br />
121
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Genshin Impact APK File and Explore a Vast World of Adventure.md DELETED
@@ -1,115 +0,0 @@
1
-
2
- <h1>Genshin Impact APK File Download: How to Play the Open-World Action RPG on Your Android Device</h1>
3
- <p>If you are looking for a game that will take you to a vast magical world of adventure, then you should try <strong>Genshin Impact</strong>. Genshin Impact is an open-world action RPG that lets you explore seven nations, meet a diverse cast of characters, and fight powerful enemies, all while searching for your lost sibling. You can also wander freely, immerse yourself in a world filled with life, and uncover all of its mysteries. Sounds exciting, right?</p>
4
- <p>But what if you don't have a PC or a console to play this game? Don't worry, because you can still enjoy Genshin Impact on your Android device. All you need is to download and install the <strong>Genshin Impact APK file</strong> on your device, and you are good to go. In this article, we will show you how to do that, as well as some tips and tricks to enhance your gaming experience. Let's get started!</p>
5
- <h2>genshin impact apk file download</h2><br /><p><b><b>Download Zip</b> &#10026; <a href="https://urlin.us/2uSX7b">https://urlin.us/2uSX7b</a></b></p><br /><br />
6
- <h2>What You Need to Download and Install Genshin Impact APK File on Your Android Device</h2>
7
- <p>Before you download and install the Genshin Impact APK file on your Android device, you need to make sure that your device meets the minimum system requirements and has enough storage space. Here are the details:</p>
8
- <ul>
9
- <li><strong>Minimum system requirements:</strong> Android 7.0 or higher, ARM v8a 64-bit device, at least 3 GB of RAM.</li>
10
- <li><strong>Storage space:</strong> At least 8 GB of free space.</li>
11
- </ul>
12
- <p>If your device meets these requirements, then you can proceed to download and install the Genshin Impact APK file on your device.</p>
13
- <h2>How to Download and Install Genshin Impact APK File on Your Android Device</h2>
14
- <p>Downloading and installing the Genshin Impact APK file on your Android device is easy and simple. Just follow these steps:</p>
15
- <p>genshin impact android apk download free<br />
16
- genshin impact apk obb download latest version<br />
17
- genshin impact mobile apk download link<br />
18
- genshin impact apk mod download unlimited primogems<br />
19
- genshin impact apk download for pc windows 10<br />
20
- genshin impact apk pure download safe<br />
21
- genshin impact apk offline download no internet<br />
22
- genshin impact apk update download new characters<br />
23
- genshin impact apk mirror download fast<br />
24
- genshin impact apk full download size<br />
25
- genshin impact beta apk download test server<br />
26
- genshin impact cracked apk download hack<br />
27
- genshin impact chinese apk download voice<br />
28
- genshin impact direct apk download no verification<br />
29
- genshin impact english apk download language<br />
30
- genshin impact fan made apk download alternative<br />
31
- genshin impact global apk download region<br />
32
- genshin impact hd apk download graphics<br />
33
- genshin impact ios apk download iphone<br />
34
- genshin impact japan apk download server<br />
35
- genshin impact korean apk download voiceover<br />
36
- genshin impact lite apk download low spec<br />
37
- genshin impact original apk download official<br />
38
- genshin impact premium apk download features<br />
39
- genshin impact quest apk download adventure<br />
40
- genshin impact revdl apk download site<br />
41
- genshin impact rexdl apk download site<br />
42
- genshin impact steam apk download platform<br />
43
- genshin impact tap tap apk download store<br />
44
- genshin impact uptodown apk download site<br />
45
- genshin impact vpn apk download bypass<br />
46
- genshin impact xapk download installer<br />
47
- how to download genshin impact apk on android phone<br />
48
- where to download genshin impact apk safely and securely<br />
49
- why can't i download genshin impact apk error fix<br />
50
- best website to download genshin impact apk reviews and ratings<br />
51
- can you play genshin impact without downloading the apk file online<br />
52
- does downloading the genshin impact apk file require root access permission<br />
53
- is it legal to download the genshin impact apk file from third-party sources terms and conditions<br />
54
- what are the benefits of downloading the genshin impact apk file over the play store version comparison and contrast<br />
55
- what are the risks of downloading the genshin impact apk file from unknown sources malware and virus warning<br />
56
- what is the difference between the genshin impact apk file and the xapk file format explanation and guide<br />
57
- what is the minimum requirement to download and install the genshin impact apk file on your device specifications and compatibility<br />
58
- when will the next update for the genshin impact apk file be available release date and schedule</p>
59
- <ol>
60
- <li><strong>Step 1:</strong> Go to the official website of Genshin Impact at <a href="(^1^)">https://genshin.mihoyo.com/en/download</a> and download the APK file. You can also scan the QR code on the website with your device camera to download the file directly.</li>
61
- <li><strong>Step 2:</strong> Enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
62
- <li><strong>Step 3:</strong> Locate the downloaded APK file on your device storage and tap on it to install it. You may need to grant some permissions to the app during the installation process.</li>
63
- <li><strong>Step 4:</strong> Launch the game and enjoy. You may need to download some additional data before you can start playing. You can also log in with your miHoYo account or create a new one to save your progress and access more features.</li>
64
- </ol>
65
- <p>Congratulations, you have successfully downloaded and installed the Genshin Impact APK file on your Android device. Now you can explore the world of Teyvat and embark on an epic journey.</p>
66
- <h2>Tips and Tricks to Enhance Your Gaming Experience with Genshin Impact APK File on Your Android Device</h2>
67
- <p>Genshin Impact is a game that offers a lot of content and features for you to enjoy. However, you may encounter some challenges or difficulties while playing it on your Android device. Here are some tips and tricks that can help you overcome them and enhance your gaming experience:</p>
68
- <ul>
69
- <li><strong>Tip 1:</strong> Adjust the graphics settings according to your device performance. Genshin Impact is a game that has stunning graphics and visuals, but it can also be demanding on your device resources. If you experience lag, stuttering, or overheating, you may want to lower the graphics settings to improve the performance and reduce the battery consumption. You can do this by going to Settings > Graphics and choosing the option that suits your device best.</li>
70
- <li><strong>Tip 2:</strong> Use a controller or a keyboard and mouse for better control. Genshin Impact is a game that requires precise and responsive controls, especially during combat and exploration. While you can play it with the touch screen, you may find it more comfortable and convenient to use a controller or a keyboard and mouse instead. Genshin Impact supports various controllers and keyboard and mouse configurations, so you can choose the one that works best for you. You can also customize the buttons and keys according to your preferences.</li>
71
- <li><strong>Tip 3:</strong> Connect to a stable Wi-Fi network for smooth gameplay. Genshin Impact is a game that requires an internet connection to play, as it constantly updates its data and syncs your progress with the server. If you have a weak or unstable Wi-Fi connection, you may experience lag, disconnection, or data loss. To avoid these issues, make sure you connect to a reliable and fast Wi-Fi network before you launch the game. You can also use a VPN service if you encounter any regional restrictions or problems.</li>
72
- </ul>
73
- <p>These are some of the tips and tricks that can help you enhance your gaming experience with Genshin Impact APK file on your Android device. Of course, there are more things that you can discover and learn as you play the game, so feel free to experiment and have fun.</p>
74
- <h2>Conclusion: Why You Should Download and Play Genshin Impact APK File on Your Android Device Today</h2>
75
- <p>Genshin Impact is a game that offers a lot of benefits and advantages for Android users who want to play an open-world action RPG on their devices. Here are some of the reasons why you should download and play Genshin Impact APK file on your Android device today:</p>
76
- <ul>
77
- <li><strong>It's free to play.</strong> You don't have to pay anything to download and play Genshin Impact on your Android device. You can also enjoy most of the content and features without spending any money. However, if you want to support the developers or get some extra perks, you can also make in-app purchases using real money.</li>
78
- <li><strong>It's compatible with other platforms.</strong> You don't have to worry about losing your progress or missing out on anything if you switch devices or platforms. Genshin Impact is compatible with PC, PS4, PS5, iOS, Android, and soon Nintendo Switch. You can also cross-play with other players who are using different platforms, as long as you are in the same server region.</li>
79
- <li><strong>It's constantly updated and improved.</strong> You don't have to worry about getting bored or running out of things to do in Genshin Impact. The developers are always working hard to add new content and features, such as new characters, items, quests, events, regions, modes, and more. They also fix bugs and glitches regularly to ensure a smooth and enjoyable gameplay.</li>
80
- </ul>
81
- <p>Genshin Impact is a game that will keep you entertained and engaged for hours on end. It has everything that you could ask for in an open-world action RPG: a captivating story, a beautiful world, a diverse cast of characters, a thrilling combat system, a rich customization system , and a lot of fun and surprises. If you are an Android user who loves games like this, then you should definitely download and play Genshin Impact APK file on your device today. You won't regret it.</p>
82
- <h2>FAQs: Frequently Asked Questions About Genshin Impact APK File Download</h2>
83
- <p>Here are some of the most common questions that people ask about Genshin Impact APK file download:</p>
84
- <h3>Q1: Is Genshin Impact free to play?</h3>
85
- <p>A1: Yes, Genshin Impact is free to play. You can download and play it on your Android device without paying anything. However, you can also make in-app purchases using real money if you want to support the developers or get some extra perks.</p>
86
- <h3>Q2: Is Genshin Impact compatible with other platforms?</h3>
87
- <p>A2: Yes, Genshin Impact is compatible with other platforms. You can play it on PC, PS4, PS5, iOS, Android, and soon Nintendo Switch. You can also cross-play with other players who are using different platforms, as long as you are in the same server region.</p>
88
- <h3>Q3: How can I update Genshin Impact APK file on my Android device?</h3>
89
- <p>A3: You can update Genshin Impact APK file on your Android device by following these steps:</p>
90
- <ol>
91
- <li>Go to the official website of Genshin Impact at <a href="">https://genshin.mihoyo.com/en/download</a> and download the latest version of the APK file.</li>
92
- <li>Locate the downloaded APK file on your device storage and tap on it to install it. You may need to grant some permissions to the app during the installation process.</li>
93
- <li>Launch the game and enjoy the new updates.</li>
94
- </ol>
95
- <p>You can also check for updates within the game by going to Settings > Other > Check for Updates.</p>
96
- <h3>Q4: How can I get more characters and items in Genshin Impact?</h3>
97
- <p>A4: You can get more characters and items in Genshin Impact by doing the following:</p>
98
- <ul>
99
- <li>Completing quests, events, achievements, and challenges to earn rewards such as Primogems, Mora, Hero's Wit, and more.</li>
100
- <li>Using Primogems to make wishes in the gacha system, which can give you a chance to obtain rare characters and items.</li>
101
- <li>Exploring the world and opening chests, solving puzzles, finding secrets, and collecting resources.</li>
102
- <li>Joining a co-op mode with other players and completing domains, bosses, and events together.</li>
103
- <li>Exchanging items with NPCs or shops using Mora or other currencies.</li>
104
- </ul>
105
- <p>You can also use real money to buy Genesis Crystals, which can be converted into Primogems for making wishes.</p>
106
- <h3>Q5: How can I contact the customer service of Genshin Impact?</h3>
107
- <p>A5: You can contact the customer service of Genshin Impact by doing the following:</p>
108
- <ul>
109
- <li>Going to Settings > Feedback > Submit Feedback and filling out the form with your issue or suggestion.</li>
110
- <li>Sending an email to [email protected] with your UID, server region, device model, and screenshots or videos of your problem.</li>
111
- <li>Visiting the official website of Genshin Impact at <a href="">https://genshin.mihoyo.com/en/home</a> and clicking on the Support button at the bottom right corner of the page.</li>
112
- </ul>
113
- <p>You can also check out the official social media accounts of Genshin Impact for more information and updates.</p> 197e85843d<br />
114
- <br />
115
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Enjoy Racing in Car 2 with Mod APK Features Unlimited Money All Cars Unlocked and More.md DELETED
@@ -1,95 +0,0 @@
1
- <br />
2
- <h1>Racing in Car 2 Happymod APK: A Realistic and Fun Driving Simulator</h1>
3
- <p>If you are a fan of racing games, you might have heard of Racing in Car 2, a popular game that lets you drive your car in a cockpit view through endless traffic and realistic environment. But did you know that there is a modified version of this game that gives you unlimited money and features? In this article, we will tell you everything you need to know about Racing in Car 2 Happymod APK, including what it is, how to download it, and how to play it on your PC or Mac. We will also share some tips and tricks to help you improve your racing skills and become the king of the road.</p>
4
- <h2>racing in car 2 happymod apk</h2><br /><p><b><b>Download File</b> &#9745; <a href="https://jinyurl.com/2uNOOj">https://jinyurl.com/2uNOOj</a></b></p><br /><br />
5
- <h2>What is Racing in Car 2?</h2>
6
- <p>Racing in Car 2 is a first-person racing game developed by Fast Free Games. It is one of the most realistic and immersive driving simulators on the market, with stunning graphics, physics, and sound effects. You can choose from a variety of cars, ranging from sports cars to SUVs, and drive them on different tracks and locations, such as highways, deserts, cities, or snow. You can also customize your car with different colors, wheels, stickers, and spoilers.</p>
7
- <p>The game has a simple and intuitive control system that lets you steer your car by tilting your device or using buttons on the screen. You can also adjust the camera angle to get a better view of the road. The game has an endless mode where you can drive as fast as possible, overtake traffic cars, earn coins, and buy new cars. You can also compete with other players on global leaderboards.</p>
8
- <h <h2>What is Happymod APK?</h2>
9
- <p>Happymod APK is a modified version of Racing in Car 2 that gives you unlimited money and features. It is a third-party application that is not available on the official app stores, but you can download it from various websites for free. Happymod APK is a safe and easy way to enjoy the game without any restrictions or limitations. You can use the money to unlock all the cars and customize them as you wish. You can also play the game without ads or in-app purchases, and challenge yourself with different difficulty levels and leaderboards.</p>
10
- <p>Happymod APK is compatible and updated with the original version of Racing in Car 2, so you don't have to worry about missing out on any new updates or features. You can also play the game online or offline, depending on your preference. Happymod APK is a great option for anyone who loves racing games and wants to have more fun and freedom.</p>
11
- <p>racing in car 2 mod apk unlimited money<br />
12
- racing in car 2 hack apk download<br />
13
- racing in car 2 3d mod apk<br />
14
- racing in car 2 apk mod latest version<br />
15
- racing in car 2 mod apk android 1<br />
16
- racing in car 2 mod apk revdl<br />
17
- racing in car 2 mod apk happymod.com<br />
18
- racing in car 2 mod apk free download<br />
19
- racing in car 2 mod apk offline<br />
20
- racing in car 2 mod apk no ads<br />
21
- racing in car 2 mod apk rexdl<br />
22
- racing in car 2 mod apk unlimited coins<br />
23
- racing in car 2 mod apk all cars unlocked<br />
24
- racing in car 2 mod apk unlimited diamonds<br />
25
- racing in car 2 mod apk unlimited fuel<br />
26
- racing in car 2 mod apk online<br />
27
- racing in car 2 mod apk pure<br />
28
- racing in car 2 mod apk obb<br />
29
- racing in car 2 mod apk old version<br />
30
- racing in car 2 mod apk new update<br />
31
- racing in car 2 mod apk for pc<br />
32
- racing in car 2 mod apk for ios<br />
33
- racing in car 2 mod apk for windows 10<br />
34
- racing in car 2 mod apk for laptop<br />
35
- racing in car 2 mod apk for mac<br />
36
- racing in car 2 mod apk full version<br />
37
- racing in car 2 mod apk premium<br />
38
- racing in car 2 mod apk pro<br />
39
- racing in car 2 mod apk vip<br />
40
- racing in car 2 mod apk mega mod<br />
41
- racing in car 2 mod apk super mod<br />
42
- racing in car 2 mod apk god mode<br />
43
- racing in car 2 mod apk unlimited everything<br />
44
- racing in car 2 mod apk unlimited all<br />
45
- racing in car 2 mod apk cheat menu<br />
46
- racing in car 2 mod apk hack menu<br />
47
- racing in car 2 mod apk unlock all cars<br />
48
- racing in car 2 mod apk unlock all levels<br />
49
- racing in car 2 mod apk unlock all features<br />
50
- racing in car 2 mod apk no root<br />
51
- racing in car 2 mod apk no verification<br />
52
- racing in car 2 mod apk no survey<br />
53
- racing in car 2 mod apk no password<br />
54
- racing in car 2 mod apk no ban<br />
55
- racing in car 2 happymod download free</p>
56
- <h2>What are the benefits of using Racing in Car 2 Happymod APK?</h2>
57
- <p>There are many benefits of using Racing in Car 2 Happymod APK, such as:</p>
58
- <ul>
59
- <li>You can unlock all the cars and customize them as you wish. You can choose from different models, colors, wheels, stickers, and spoilers. You can also upgrade your car's performance, such as speed, acceleration, handling, and braking.</li>
60
- <li>You can enjoy the game without ads or in-app purchases. You don't have to watch annoying ads or spend real money to buy coins or features. You can play the game smoothly and without interruptions.</li>
61
- <li>You can challenge yourself with different difficulty levels and leaderboards. You can choose from easy, medium, hard, or extreme modes, depending on your skill level. You can also compete with other players on global leaderboards and see how you rank among them.</li>
62
- </ul>
63
- <h2>How to download and play Racing in Car 2 Happymod APK on your PC or Mac?</h2>
64
- <p>If you want to play Racing in Car 2 Happymod APK on your PC or Mac, you need to follow these steps:</p>
65
- <ol>
66
- <li>You need an emulator like BlueStacks or NoxPlayer to run the game on your computer. An emulator is a software that simulates an Android device on your PC or Mac, allowing you to run Android apps and games. You can download BlueStacks or NoxPlayer from their official websites for free.</li>
67
- <li>You need to download the Happymod APK file from a reliable source. You can search for Racing in Car 2 Happymod APK on Google or use the link below to download it directly.</li>
68
- <li>You need to install the APK file on your emulator and launch the game. To do this, you need to drag and drop the APK file into the emulator window or use the built-in file manager to locate and install it. Once the installation is complete, you can open the game and start playing.</li>
69
- </ol> <h2>Tips and tricks to improve your racing skills in Racing in Car 2</h2>
70
- <p>Racing in Car 2 is a game that requires skill and concentration to master. If you want to improve your racing skills and beat your own records, you can follow these tips and tricks:</p>
71
- <ul>
72
- <li>Learn the track layout and memorize the turns and obstacles. Each track has its own features and challenges, such as sharp curves, narrow lanes, traffic jams, or bridges. You need to know when to slow down, speed up, or change lanes to avoid crashing or losing time.</li>
73
- <li>Adjust your steering sensitivity and camera angle according to your preference. You can change the settings of the game to make it easier or harder for you to control your car. You can also choose the best camera angle for your vision, whether it is behind the car, on the hood, or on the dashboard.</li>
74
- <li>Use the brake and accelerator wisely and avoid collisions with other cars. You need to balance your speed and braking to maintain a smooth and steady driving. You also need to avoid hitting other cars or objects on the road, as they will damage your car and reduce your score.</li>
75
- </ul>
76
- <h2>Conclusion</h2>
77
- <p>Racing in Car 2 Happymod APK is a fun and realistic driving simulator that you can play on your PC or Mac. You can download the mod for free and enjoy unlimited money and features. You can unlock all the cars and customize them as you wish. You can also play the game without ads or in-app purchases, and challenge yourself with different difficulty levels and leaderboards. You can improve your racing skills by following some simple tips and tricks. Racing in Car 2 Happymod APK is a great option for anyone who loves racing games and wants to have more fun and freedom.</p>
78
- <h2>FAQs</h2>
79
- <h4>Is Racing in Car 2 Happymod APK safe to use?</h4>
80
- <p>Yes, Racing in Car 2 Happymod APK is safe to use, as long as you download it from a trusted source. However, you should always be careful when downloading any third-party app or mod, as they may contain viruses or malware that can harm your device. You should also check the permissions and reviews of the app before installing it.</p>
81
- <h4>How to update Racing in Car 2 Happymod APK?</h4>
82
- <p>To update Racing in Car 2 Happymod APK, you need to download the latest version of the mod from the same source where you downloaded it before. Then, you need to uninstall the old version of the mod from your emulator and install the new one. You should also make sure that the original game is updated to the latest version as well.</p>
83
- <h4>How to uninstall Racing in Car 2 Happymod APK?</h4>
84
- <p>To uninstall Racing in Car 2 Happymod APK, you need to go to the settings of your emulator and find the app manager. Then, you need to select Racing in Car 2 Happymod APK and click on uninstall. You can also delete the APK file from your device if you don't need it anymore.</p>
85
- <h4>What are some other racing games similar to Racing in Car 2?</h4>
86
- <p>If you like Racing in Car 2, you might also like some other racing games similar to it, such as:</p>
87
- <ul>
88
- <li>Racing in Car: A predecessor of Racing in Car 2 that has similar gameplay and features.</li>
89
- <li>Real Racing 3: A realistic racing game that features real cars, tracks, events, and graphics.</li>
90
- <li>Asphalt 9: Legends: A fast-paced racing game that features stunning graphics, arcade-style gameplay, and online multiplayer mode.</li>
91
- </ul>
92
- <h4>Where can I find more information about Racing in Car 2?</h4>
93
- <p>If you want to find more information about Racing in Car 2, you can visit the official website of Fast Free Games or their social media pages. You can also check out some online forums or blogs that discuss racing games or mods.</p> 197e85843d<br />
94
- <br />
95
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Europes The Final Countdown - MP3 Download and Streaming - Rock Music Library.md DELETED
@@ -1,158 +0,0 @@
1
- <br />
2
- <h1>How to Download The Final Countdown MP3 for Free</h1>
3
- <p>If you are a fan of classic rock music, you might have heard of the song <strong>The Final Countdown</strong> by the Swedish band Europe. This song was released in 1986 and became a worldwide hit, reaching number one in 25 countries. It is also one of the most recognizable songs in pop culture, being used in movies, TV shows, commercials, sports events, and memes. But how can you download this iconic song as an MP3 file for free? In this article, we will show you how to do that from different sources, as well as explain what the song is about and why you might want to have it on your device.</p>
4
- <h2>What is The Final Countdown Song?</h2>
5
- <p>The Final Countdown is a song by Europe, a rock band from Sweden that was formed in 1979. The song was written by Joey Tempest, the lead singer and keyboardist of the band, and was based on a keyboard riff he made in the early 1980s. The lyrics were inspired by David Bowie's Space Oddity, and they describe a scenario where humanity is leaving Earth to explore space. The song was released as the first single from their third album, also titled The Final Countdown, in 1986.</p>
6
- <h2>download final countdown mp3</h2><br /><p><b><b>Download File</b> &#187;&#187;&#187; <a href="https://jinyurl.com/2uNPf6">https://jinyurl.com/2uNPf6</a></b></p><br /><br />
7
- <h3>The history and popularity of the song</h3>
8
- <p>The song was initially intended to be an album opener, but the band's manager suggested that it should be released as a single. The band agreed, and they recorded a shorter version of the song for radio play. The song became a huge success, reaching number one on the charts in many countries, including the UK, Germany, France, Italy, Spain, Australia, Canada, and Japan. It also reached number eight on the Billboard Hot 100 in the US. The song sold over 15 million copies worldwide and became one of the best-selling singles of all time.</p>
9
- <h3>The meaning and message of the song</h3>
10
- <p>The song is often interpreted as a farewell to Earth or a celebration of human exploration. Some people also see it as a metaphor for the end of the Cold War or a prophecy of an impending apocalypse. However, according to Joey Tempest, the song has no specific meaning or message. He said that he just wanted to write a catchy and epic song that would make people feel good. He also said that he was influenced by science fiction movies and books, such as Star Wars and Arthur C. Clarke's 2010: Odyssey Two.</p>
11
- <h2>Why You Might Want to Download The Final Countdown MP3</h2>
12
- <p>There are many reasons why you might want to download The Final Countdown MP3 for free. Here are some of them:</p>
13
- <h3>The benefits of having the song on your device</h3>
14
- <ul>
15
- <li>You can listen to it anytime and anywhere without an internet connection or streaming service.</li>
16
- <li>You can create your own playlists and mixtapes with your favorite songs.</li>
17
- <li>You can use it as a ringtone, alarm clock, or notification sound.</li>
18
- <li>You can share it with your friends and family who might not have access to it online.</li>
19
- <li>You can enjoy the high-quality sound and original version of the song.</li>
20
- </ul>
21
- <h3>The legal and ethical issues of downloading music for free</h <h3>The legal and ethical issues of downloading music for free</h3>
22
- <p>Downloading music for free from websites that do not have the permission of the artists or the record labels is a form of piracy, which is illegal and can have serious consequences. Piracy violates the intellectual property rights of the creators and owners of the music, who deserve to be compensated for their work and investment. Piracy also harms the music industry, which relies on the revenue from sales and streaming to support new artists and produce quality music. According to the International Federation of the Phonographic Industry (IFPI), piracy causes an estimated loss of $12.5 billion per year for the global music industry. Furthermore, downloading music for free from untrusted sources can expose your device to malware, viruses, and other security risks.</p>
23
- <p>Therefore, downloading music for free is not only illegal, but also unethical and risky. You should respect the rights and efforts of the musicians and the music industry, and support them by paying for their music legally. You should also protect your device and personal information by avoiding suspicious websites and links. There are many legal and affordable ways to enjoy music online, such as streaming services, digital stores, and official websites.</p>
24
- <h2>How to Download The Final Countdown MP3 from Different Sources</h2>
25
- <p>If you want to download The Final Countdown MP3 for free, you have several options to choose from. However, you should be aware that some of these options may not be legal or safe, depending on the source and the quality of the file. Here are some of the most common ways to download The Final Countdown MP3 from different sources:</p>
26
- <p>download final countdown mp3 free<br />
27
- download final countdown mp3 320kbps<br />
28
- download final countdown mp3 song<br />
29
- download final countdown mp3 ringtone<br />
30
- download final countdown mp3 europe<br />
31
- download final countdown mp3 music<br />
32
- download final countdown mp3 audio<br />
33
- download final countdown mp3 online<br />
34
- download final countdown mp3 from archive.org[^1^] [^2^]<br />
35
- download final countdown mp3 by europe<br />
36
- download final countdown mp3 original<br />
37
- download final countdown mp3 remix<br />
38
- download final countdown mp3 instrumental<br />
39
- download final countdown mp3 high quality<br />
40
- download final countdown mp3 for android<br />
41
- download final countdown mp3 for iphone<br />
42
- download final countdown mp3 with lyrics<br />
43
- download final countdown mp3 full song<br />
44
- download final countdown mp3 from youtube<br />
45
- download final countdown mp3 from spotify<br />
46
- download the final countdown movie 1980[^3^]<br />
47
- download the final countdown movie soundtrack<br />
48
- download the final countdown movie theme song<br />
49
- download the final countdown movie in hindi<br />
50
- download the final countdown movie free online<br />
51
- download the final countdown live version<br />
52
- download the final countdown rock the night carrie<br />
53
- download the final countdown album zip<br />
54
- download the final countdown album cover<br />
55
- download the final countdown album songs<br />
56
- how to download the final countdown mp3<br />
57
- where to download the final countdown mp3<br />
58
- best site to download the final countdown mp3<br />
59
- best app to download the final countdown mp3<br />
60
- best way to download the final countdown mp3<br />
61
- can i download the final countdown mp3 legally<br />
62
- can i download the final countdown mp3 for free<br />
63
- can i download the final countdown mp3 on my phone<br />
64
- can i download the final countdown mp3 from amazon music<br />
65
- can i download the final countdown mp3 from itunes</p>
66
- <h3>How to download the song from YouTube</h3>
67
- <p>YouTube is one of the most popular platforms to listen to The Final Countdown song, as it has several official and unofficial versions of the video. However, YouTube does not allow you to download audio directly from its website or app, unless you have a YouTube Music Premium or YouTube Premium subscription. If you don't have a subscription, you can use a third-party tool to download The Final Countdown MP3 from YouTube. There are many online tools and apps that can help you do that, such as 4K Video Downloader, wikiHow, How-To Geek, etc. To use these tools, you need to follow these general steps:</p>
68
- <ol>
69
- <li>Go to YouTube and find the video of The Final Countdown song that you want to download.</li>
70
- <li>Copy the URL of the video from the address bar or by right-clicking on the video.</li>
71
- <li>Go to the website or app of the tool that you want to use and paste the URL in the designated box.</li>
72
- <li>Select MP3 as the output format and choose the quality that you prefer.</li>
73
- <li>Click on Download or Convert and wait for the process to finish.</li>
74
- <li>Save the downloaded MP3 file to your device or cloud storage.</li>
75
- </ol>
76
- <p>Note that some tools may require you to install additional software or register an account before downloading. You should also be careful about clicking on ads or pop-ups that may redirect you to malicious websites or download unwanted programs.</p> <h3>How to download the song from SoundCloud</h3>
77
- <p>SoundCloud is another popular platform to listen to The Final Countdown song, as it has several versions of the song uploaded by different users. However, SoundCloud does not allow you to download audio directly from its website or app, unless the uploader has enabled the download option. If the download option is not available, you can use a third-party tool to download The Final Countdown MP3 from SoundCloud. There are many online tools and apps that can help you do that, such as SCDL, SoundCloud Downloader, SoundCloud To MP3, etc. To use these tools, you need to follow these general steps:</p>
78
- <ol>
79
- <li>Go to SoundCloud and find the version of The Final Countdown song that you want to download.</li>
80
- <li>Copy the URL of the song from the address bar or by right-clicking on the song.</li>
81
- <li>Go to the website or app of the tool that you want to use and paste the URL in the designated box.</li>
82
- <li>Select MP3 as the output format and choose the quality that you prefer.</li>
83
- <li>Click on Download or Convert and wait for the process to finish.</li>
84
- <li>Save the downloaded MP3 file to your device or cloud storage.</li>
85
- </ol>
86
- <p>Note that some tools may require you to install additional software or register an account before downloading. You should also be careful about clicking on ads or pop-ups that may redirect you to malicious websites or download unwanted programs.</p> <h3>How to download the song from other websites</h3>
87
- <p>Besides YouTube and SoundCloud, there are many other websites that offer The Final Countdown MP3 for free download. However, these websites may not be authorized by the artists or the record labels, and they may not have the best quality or the original version of the song. Moreover, these websites may contain viruses, malware, spyware, or other harmful software that can damage your device or steal your personal information. Therefore, you should be very cautious when using these websites and only download from trusted and reputable sources. Here are some examples of websites that claim to provide The Final Countdown MP3 for free download:</p>
88
- <table>
89
- <tr>
90
- <th>Website</th>
91
- <th>URL</th>
92
- <th>Remarks</th>
93
- </tr>
94
- <tr>
95
- <td>MP3Juices</td>
96
- <td></td>
97
- <td>A free MP3 search engine that allows you to search and download songs from various sources.</td>
98
- </tr>
99
- <tr>
100
- <td>Zippyshare</td>
101
- <td></td>
102
- <td>A free file hosting service that allows you to upload and download files up to 500 MB.</td>
103
- </tr>
104
- <tr>
105
- <td>MP3Skull</td>
106
- <td></td>
107
- <td>A free MP3 download website that offers a large collection of songs from different genres and artists.</td>
108
- </tr>
109
- </table>
110
- <p>To use these websites, you need to follow these general steps:</p>
111
- <ol>
112
- <li>Go to the website and search for The Final Countdown song by typing the name or the keywords in the search box.</li>
113
- <li>Choose the version of the song that you want to download from the list of results.</li>
114
- <li>Click on Download or Play and wait for the file to load.</li>
115
- <li>Save the downloaded MP3 file to your device or cloud storage.</li>
116
- </ol>
117
- <p>Note that some websites may require you to complete a captcha, a survey, or an offer before downloading. You should also be careful about clicking on ads or pop-ups that may redirect you to malicious websites or download unwanted programs.</p>
118
- <h2>Conclusion</h2>
119
- <p>The Final Countdown is a classic rock song by Europe that has become a global phenomenon and a cultural icon. It is a song that can inspire, motivate, and entertain you with its catchy melody and epic lyrics. If you want to download this song as an MP3 file for free, you have several options to choose from, such as YouTube, SoundCloud, or other websites. However, you should be aware of the legal and ethical issues of downloading music for free, as well as the potential risks of using untrusted sources. You should respect the rights and efforts of the musicians and the music industry, and support them by paying for their music legally. You should also protect your device and personal information by avoiding suspicious websites and links. There are many legal and affordable ways to enjoy music online, such as streaming services, digital stores, and official websites.</p>
120
- <h2>FAQs</h2>
121
- <h3>Who wrote and performed The Final Countdown?</h3>
122
- <p>The Final Countdown was written by Joey Tempest, the lead singer and keyboardist of Europe, a rock band from Sweden. The band members who performed the song were Joey Tempest (vocals and keyboards), John Norum (guitar), John Levén (bass), Mic Michaeli (keyboards), and Ian Haugland (drums).</p>
123
- <h3>What genre is The Final Countdown?</h3>
124
- <p>The Final Countdown is a rock song that belongs to the subgenre of glam metal or hair metal. This is a style of rock music that emerged in the late 1970s and early 1980s, characterized by flashy outfits, heavy makeup, big hair, catchy hooks, power ballads, guitar solos, and anthemic choruses.</p>
125
- <h3>How many views does The Final Countdown have on YouTube?</h3>
126
- <p>The official video of The Final Countdown by Europe has over 1 billion views on YouTube as of June 2023. This makes it one of the most viewed videos on YouTube and one of the most viewed music videos of all time.</p>
127
- <h3>Is it legal to download music from YouTube?</h3>
128
- <p>No, it is not legal to download music from YouTube without the permission of the artists or the record labels. YouTube's terms of service state that you are only allowed to stream videos from its website or app, and not to download them for offline use. Downloading music from YouTube violates the intellectual property rights of the creators and owners of the music, who deserve to be compensated for their work and investment. Downloading music from YouTube also harms the music industry, which relies on the revenue from views and ads to support new artists and produce quality music.</p>
129
- <h3>What are <h3>What are some alternatives to downloading music for free?</h3>
130
- <p>If you want to enjoy music online without downloading it for free, you have many alternatives to choose from. Some of the most popular and affordable ways to listen to music online are streaming services, digital stores, and official websites. Here are some examples of these alternatives:</p>
131
- <table>
132
- <tr>
133
- <th>Alternative</th>
134
- <th>Description</th>
135
- <th>Advantages</th>
136
- <th>Disadvantages</th>
137
- </tr>
138
- <tr>
139
- <td>Streaming services</td>
140
- <td>Online platforms that allow you to stream music from a large library of songs, albums, playlists, and radio stations. Some of the most popular streaming services are Spotify, Apple Music, Amazon Music, YouTube Music, Deezer, Tidal, etc.</td>
141
- <td>You can access millions of songs from different genres and artists. You can discover new music and personalized recommendations. You can create your own playlists and share them with others. You can listen to music offline with a premium subscription. You can support the artists and the music industry by paying a monthly fee.</td>
142
- <td>You need an internet connection or a premium subscription to listen to music offline. You may not find some songs or artists that are exclusive to other platforms. You may have to deal with ads or limited features with a free subscription. You may have to pay extra for some features or content.</td>
143
- </tr>
144
- <tr>
145
- <td>Digital stores</td>
146
- <td>Online platforms that allow you to buy and download individual songs or albums as digital files. Some of the most popular digital stores are iTunes, Google Play Music, Amazon Music, Bandcamp, etc.</td>
147
- <td>You can own the music that you buy and download. You can enjoy the high-quality sound and original version of the music. You can transfer the music to any device or cloud storage. You can support the artists and the music industry by paying for their music.</td>
148
- <td>You have to pay for each song or album that you want to download. You may not find some songs or artists that are exclusive to other platforms. You may have to deal with DRM (digital rights management) restrictions that limit your use of the music.</td>
149
- </tr>
150
- <tr>
151
- <td>Official websites</td>
152
- <td>Online platforms that belong to the artists or the record labels that offer their music for streaming or downloading. Some of the most popular official websites are SoundCloud, Bandcamp, YouTube, etc.</td>
153
- <td>You can listen to the music directly from the source. You can find some songs or artists that are not available on other platforms. You can support the artists and the record labels by paying for their music or donating to them.</td>
154
- <td>You may not find a large variety or quantity of music on these platforms. You may have to deal with ads or limited features on these platforms. You may not be able to download the music from these platforms unless they allow it.</td>
155
- </tr>
156
- </table></p> 197e85843d<br />
157
- <br />
158
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2gauravc/search_summary_chatgpt/main.py DELETED
@@ -1,104 +0,0 @@
1
-
2
- import config
3
- import openai
4
- import sys, getopt
5
- from datetime import datetime
6
- import streamlit as st
7
- import boto3
8
-
9
- def get_chatgpt_resp(question):
10
- openai.api_key = st.secrets['OPENAI_API_KEY']
11
- response = openai.ChatCompletion.create(
12
- model='gpt-3.5-turbo',
13
- messages=[
14
- {"role":"system","content":"You are a chatbot"},
15
- {"role":"system","content":question}]
16
- )
17
- result = ''
18
- for choice in response.choices:
19
- result+=choice.message.content
20
-
21
- return (result)
22
-
23
- def gsearch(query, num_results):
24
- try:
25
- from googlesearch import search
26
- except ImportError:
27
- print("No module named 'google' found")
28
-
29
- # Google Search and return 10 links
30
- search_links = []
31
- for j in search(query, tld="com", num=num_results, stop=num_results, pause=2):
32
- search_links.append(j)
33
- return(search_links)
34
-
35
-
36
- def chatgpt_prompt(pname, search_links):
37
- all_links = '\n'.join(map(str,search_links))
38
- prompt_text = "You are a expert KYC analyst. I need help to identify if there is any adverse news about {}\
39
- in the following links. \n {}. \n. In the reply include a 20 word summary of the text in each link and if you find any adverse\
40
- news (Yes or No)".format(pname, all_links)
41
- return(prompt_text)
42
-
43
- def generate_kyc_output(query, search_links, chat_response, start_time):
44
- rep_txt = ''
45
-
46
- rep_txt += 'Summary of Google Search for {} \n'.format(query)
47
- rep_txt += '\n'
48
- rep_txt += "Report generated on {} \n".format(datetime.now())
49
- #rep_txt += "----------------------------------------------------- \n"
50
- rep_txt += '\n'
51
- rep_txt += "Top Google Search Links "
52
- rep_txt += '\n'
53
- rep_txt += '\n'.join(map(str,search_links))
54
- #rep_txt += "\n----------------------------------------------------- \n"
55
- rep_txt += '\n'
56
- rep_txt+= "\n Summary of searches and adverse news findings \n"
57
- #rep_txt += "----------------------------------------------------- \n"
58
- rep_txt += chat_response
59
- rep_txt += '\n'
60
-
61
- end_time = datetime.now()
62
- exec_time = (end_time - start_time).total_seconds()
63
- rep_txt += "Execution runtime {} seconds \n".format(exec_time)
64
- rep_txt += '\n'
65
-
66
- return(rep_txt)
67
-
68
- def save_to_s3(search_text,date_time):
69
- s3 = boto3.resource(
70
- 's3',
71
- region_name='us-east-1',
72
- aws_access_key_id=st.secrets['AWS_ACCESS_KEY_ID'],
73
- aws_secret_access_key=st.secrets['AWS_ACCESS_KEY']
74
- )
75
- fname = ("{}.txt").format(date_time)
76
- object = s3.Object('adverse-news-search', fname)
77
- object.put(Body=search_text)
78
-
79
- def main(argv):
80
- try:
81
- opts, args = getopt.getopt(argv,"i:", ["person="])
82
- except getopt.GetoptError:
83
- print ('Usage: python app.py --person=<person name>')
84
- sys.exit(2)
85
- for opt, arg in opts:
86
- if opt == '--person':
87
- pname = arg
88
- # Google search for the person name and get the first 20 query links
89
- search_links = gsearch(pname)
90
-
91
- # Construct the prompt
92
- prompt_text = chatgpt_prompt(pname, search_links)
93
-
94
- #get ChatGPT response
95
- resp = get_chatgpt_resp(prompt_text)
96
-
97
- # Create PDF with links and summary
98
- #rep_txt= generate_kyc_output(pname, search_links, resp)
99
-
100
- #print(rep_txt)
101
-
102
-
103
- if __name__ == "__main__":
104
- main(sys.argv[1:])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2ndelement/voicevox/README.md DELETED
@@ -1,579 +0,0 @@
1
- ---
2
- license: lgpl-3.0
3
- title: voicevox
4
- sdk: docker
5
- emoji: 🐢
6
- colorFrom: blue
7
- colorTo: pink
8
- pinned: true
9
- ---
10
- # VOICEVOX ENGINE
11
-
12
- [![build](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/build.yml/badge.svg)](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/build.yml)
13
- [![releases](https://img.shields.io/github/v/release/VOICEVOX/voicevox_engine)](https://github.com/VOICEVOX/voicevox_engine/releases)
14
- [![discord](https://img.shields.io/discord/879570910208733277?color=5865f2&label=&logo=discord&logoColor=ffffff)](https://discord.gg/WMwWetrzuh)
15
-
16
- [![test](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/test.yml/badge.svg)](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/test.yml)
17
- [![Coverage Status](https://coveralls.io/repos/github/VOICEVOX/voicevox_engine/badge.svg)](https://coveralls.io/github/VOICEVOX/voicevox_engine)
18
-
19
- [![build-docker](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/build-docker.yml/badge.svg)](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/build-docker.yml)
20
- [![docker](https://img.shields.io/docker/pulls/voicevox/voicevox_engine)](https://hub.docker.com/r/voicevox/voicevox_engine)
21
-
22
- [VOICEVOX](https://voicevox.hiroshiba.jp/) のエンジンです。
23
- 実態は HTTP サーバーなので、リクエストを送信すればテキスト音声合成できます。
24
-
25
- (エディターは [VOICEVOX](https://github.com/VOICEVOX/voicevox/) 、
26
- コアは [VOICEVOX CORE](https://github.com/VOICEVOX/voicevox_core/) 、
27
- 全体構成は [こちら](https://github.com/VOICEVOX/voicevox/blob/main/docs/%E5%85%A8%E4%BD%93%E6%A7%8B%E6%88%90.md) に詳細があります。)
28
-
29
- ## ダウンロード
30
-
31
- [こちら](https://github.com/VOICEVOX/voicevox_engine/releases/latest)から対応するエンジンをダウンロードしてください。
32
-
33
- ## API ドキュメント
34
-
35
- [API ドキュメント](https://voicevox.github.io/voicevox_engine/api/)をご参照ください。
36
-
37
- VOICEVOX エンジンもしくはエディタを起動した状態で http://127.0.0.1:50021/docs にアクセスすると、起動中のエンジンのドキュメントも確認できます。
38
- 今後の方針などについては [VOICEVOX 音声合成エンジンとの連携](./docs/VOICEVOX音声合成エンジンとの連携.md) も参考になるかもしれません。
39
-
40
- リクエスト・レスポンスの文字コードはすべて UTF-8 です。
41
-
42
- ### HTTP リクエストで音声合成するサンプルコード
43
-
44
- ```bash
45
- echo -n "こんにちは、音声合成の世界へようこそ" >text.txt
46
-
47
- curl -s \
48
- -X POST \
49
- "127.0.0.1:50021/audio_query?speaker=1"\
50
- --get --data-urlencode [email protected] \
51
- > query.json
52
-
53
- curl -s \
54
- -H "Content-Type: application/json" \
55
- -X POST \
56
- -d @query.json \
57
- "127.0.0.1:50021/synthesis?speaker=1" \
58
- > audio.wav
59
- ```
60
-
61
- 生成される音声はサンプリングレートが 24000Hz と少し特殊なため、音声プレーヤーによっては再生できない場合があります。
62
-
63
- `speaker` に指定する値は `/speakers` エンドポイントで得られる `style_id` です。互換性のために `speaker` という名前になっています。
64
-
65
- ### 読み方を AquesTalk 記法で取得・修正するサンプルコード
66
-
67
- `/audio_query`のレスポンスにはエンジンが判断した読み方が AquesTalk ライクな記法([本家の記法](https://www.a-quest.com/archive/manual/siyo_onseikigou.pdf)とは一部異なります)で記録されています。
68
- 記法は次のルールに従います。
69
-
70
- - 全てのカナはカタカナで記述される
71
- - アクセント句は`/`または`、`で区切る。`、`で区切った場合に限り無音区間が挿入される。
72
- - カナの手前に`_`を入れるとそのカナは無声化される
73
- - アクセント位置を`'`で指定する。全てのアクセント句にはアクセント位置を 1 つ指定する必要がある。
74
- - アクセント句末に`?`(全角)を入れることにより疑問文の発音ができる
75
-
76
- ```bash
77
- # 読ませたい文章をutf-8でtext.txtに書き出す
78
- echo -n "ディープラーニングは万能薬ではありません" >text.txt
79
-
80
- curl -s \
81
- -X POST \
82
- "127.0.0.1:50021/audio_query?speaker=1" \
83
- --get --data-urlencode [email protected] \
84
- > query.json
85
-
86
- cat query.json | grep -o -E "\"kana\":\".*\""
87
- # 結果... "kana":"ディ'イプ/ラ'アニングワ/バンノオヤクデワアリマセ'ン"
88
-
89
- # "ディイプラ'アニングワ/バンノ'オヤクデワ/アリマセ'ン"と読ませたいので、
90
- # is_kana=trueをつけてイントネーションを取得しnewphrases.jsonに保存
91
- echo -n "ディイプラ'アニングワ/バンノ'オヤクデワ/アリマセ'ン" > kana.txt
92
- curl -s \
93
- -X POST \
94
- "127.0.0.1:50021/accent_phrases?speaker=1&is_kana=true" \
95
- --get --data-urlencode [email protected] \
96
- > newphrases.json
97
-
98
- # query.jsonの"accent_phrases"の内容をnewphrases.jsonの内容に置き換える
99
- cat query.json | sed -e "s/\[{.*}\]/$(cat newphrases.json)/g" > newquery.json
100
-
101
- curl -s \
102
- -H "Content-Type: application/json" \
103
- -X POST \
104
- -d @newquery.json \
105
- "127.0.0.1:50021/synthesis?speaker=1" \
106
- > audio.wav
107
- ```
108
-
109
- ### ユーザー辞書機能について
110
-
111
- APIからユーザー辞書の参照、単語の追加、編集、削除を行うことができます。
112
-
113
- #### 参照
114
-
115
- `/user_dict`にGETリクエストを投げることでユーザー辞書の一覧を取得することができます。
116
-
117
- ```bash
118
- curl -s -X GET "127.0.0.1:50021/user_dict"
119
- ```
120
-
121
- #### 単語追加
122
-
123
- `/user_dict_word`にPOSTリクエストを投げる事でユーザー辞書に単語を追加することができます。
124
- URLパラメータとして、以下が必要です。
125
- - surface (辞書に登録する単語)
126
- - pronunciation (カタカナでの読み方)
127
- - accent_type (アクセント核位置、整数)
128
-
129
- アクセント核位置については、こちらの文章が参考になるかと思います。
130
- 〇型となっている数字の部分がアクセント核位置になります。
131
- https://tdmelodic.readthedocs.io/ja/latest/pages/introduction.html
132
-
133
- 成功した場合の返り値は単語に割り当てられるUUIDの文字列になります。
134
-
135
- ```bash
136
- surface="test"
137
- pronunciation="テスト"
138
- accent_type="1"
139
-
140
- curl -s -X POST "127.0.0.1:50021/user_dict_word" \
141
- --get \
142
- --data-urlencode "surface=$surface" \
143
- --data-urlencode "pronunciation=$pronunciation" \
144
- --data-urlencode "accent_type=$accent_type"
145
- ```
146
-
147
- #### 単語修正
148
-
149
- `/user_dict_word/{word_uuid}`にPUTリクエストを投げる事でユーザー辞書の単語を修正することができます。
150
- URLパラメータとして、以下が必要です。
151
- - surface (辞書に登録するワード)
152
- - pronunciation (カタカナでの読み方)
153
- - accent_type (アクセント核位置、整数)
154
-
155
- word_uuidは単語追加時に確認できるほか、ユーザー辞書を参照することでも確認できます。
156
- 成功した場合の返り値は`204 No Content`になります。
157
-
158
- ```bash
159
- surface="test2"
160
- pronunciation="テストツー"
161
- accent_type="2"
162
- # 環境によってword_uuidは適宜書き換えてください
163
- word_uuid="cce59b5f-86ab-42b9-bb75-9fd3407f1e2d"
164
-
165
- curl -s -X PUT "127.0.0.1:50021/user_dict_word/$word_uuid" \
166
- --get \
167
- --data-urlencode "surface=$surface" \
168
- --data-urlencode "pronunciation=$pronunciation" \
169
- --data-urlencode "accent_type=$accent_type"
170
- ```
171
-
172
- #### 単語削除
173
-
174
- `/user_dict_word/{word_uuid}`にDELETEリクエストを投げる事でユーザー辞書の単語を削除することができます。
175
-
176
- word_uuidは単語追加時に確認できるほか、ユーザー辞書を参照することでも確認できます。
177
- 成功した場合の返り値は`204 No Content`になります。
178
-
179
- ```bash
180
- # 環境によってword_uuidは適宜書き換えてください
181
- word_uuid="cce59b5f-86ab-42b9-bb75-9fd3407f1e2d"
182
-
183
- curl -s -X DELETE "127.0.0.1:50021/user_dict_word/$word_uuid"
184
- ```
185
-
186
- ### プリセット機能について
187
-
188
- `presets.yaml`を編集することで話者や話速などのプリセットを使うことができます。
189
-
190
- ```bash
191
- echo -n "プリセットをうまく活用すれば、サードパーティ間で同じ設定を使うことができます" >text.txt
192
-
193
- # プリセット情報を取得
194
- curl -s -X GET "127.0.0.1:50021/presets" > presets.json
195
-
196
- preset_id=$(cat presets.json | sed -r 's/^.+"id"\:\s?([0-9]+?).+$/\1/g')
197
- style_id=$(cat presets.json | sed -r 's/^.+"style_id"\:\s?([0-9]+?).+$/\1/g')
198
-
199
- # AudioQueryの取得
200
- curl -s \
201
- -X POST \
202
- "127.0.0.1:50021/audio_query_from_preset?preset_id=$preset_id"\
203
- --get --data-urlencode [email protected] \
204
- > query.json
205
-
206
- # 音声合成
207
- curl -s \
208
- -H "Content-Type: application/json" \
209
- -X POST \
210
- -d @query.json \
211
- "127.0.0.1:50021/synthesis?speaker=$style_id" \
212
- > audio.wav
213
- ```
214
-
215
- - `speaker_uuid`は、`/speakers`で確認できます
216
- - `id`は重複してはいけません
217
- - エンジン起動後にファイルを書き換えるとエンジンに反映されます
218
-
219
- ### 2 人の話者でモーフィングするサンプルコード
220
-
221
- `/synthesis_morphing`では、2 人の話者でそれぞれ合成された音声を元に、モーフィングした音声を生成します。
222
-
223
- ```bash
224
- echo -n "モーフィングを利用することで、2つの声を混ぜることができます。" > text.txt
225
-
226
- curl -s \
227
- -X POST \
228
- "127.0.0.1:50021/audio_query?speaker=0"\
229
- --get --data-urlencode [email protected] \
230
- > query.json
231
-
232
- # 元の話者での合成結果
233
- curl -s \
234
- -H "Content-Type: application/json" \
235
- -X POST \
236
- -d @query.json \
237
- "127.0.0.1:50021/synthesis?speaker=0" \
238
- > audio.wav
239
-
240
- export MORPH_RATE=0.5
241
-
242
- # 話者2人分の音声合成+WORLDによる音声分析が入るため時間が掛かるので注意
243
- curl -s \
244
- -H "Content-Type: application/json" \
245
- -X POST \
246
- -d @query.json \
247
- "127.0.0.1:50021/synthesis_morphing?base_speaker=0&target_speaker=1&morph_rate=$MORPH_RATE" \
248
- > audio.wav
249
-
250
- export MORPH_RATE=0.9
251
-
252
- # query、base_speaker、target_speakerが同じ場合はキャッシュが使用されるため比較的高速に生成される
253
- curl -s \
254
- -H "Content-Type: application/json" \
255
- -X POST \
256
- -d @query.json \
257
- "127.0.0.1:50021/synthesis_morphing?base_speaker=0&target_speaker=1&morph_rate=$MORPH_RATE" \
258
- > audio.wav
259
- ```
260
-
261
- ### 話者の追加情報を取得するサンプルコード
262
-
263
- 追加情報の中の portrait.png を取得するコードです。
264
- ([jq](https://stedolan.github.io/jq/)を使用して json をパースしています。)
265
-
266
- ```bash
267
- curl -s -X GET "127.0.0.1:50021/speaker_info?speaker_uuid=7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff" \
268
- | jq -r ".portrait" \
269
- | base64 -d \
270
- > portrait.png
271
- ```
272
-
273
- ### キャンセル可能な音声合成
274
-
275
- `/cancellable_synthesis`では通信を切断した場合に即座に計算リソースが開放されます。
276
- (`/synthesis`では通信を切断しても最後まで音声合成の計算が行われます)
277
- この API は実験的機能であり、エンジン起動時に引数で`--enable_cancellable_synthesis`を指定しないと有効化されません。
278
- 音声合成に必要なパラメータは`/synthesis`と同様です。
279
-
280
- ### CORS設定
281
-
282
- VOICEVOXではセキュリティ保護のため`localhost`・`127.0.0.1`・`app://`・Originなし以外のOriginからリクエストを受け入れないようになっています。
283
- そのため、一部のサードパーティアプリからのレスポンスを受け取れない可能性があります。
284
- これを回避する方法として、エンジンから設定できるUIを用意しています。
285
-
286
- #### 設定方法
287
-
288
- 1. <http://127.0.0.1:50021/setting> にアクセスします。
289
- 2. 利用するアプリに合わせて設定を変更、追加してください。
290
- 3. 保存ボタンを押して、変更を確定してください。
291
- 4. 設定の適用にはエンジンの再起動が必要です。必要に応じて再起動をしてください。
292
-
293
- ## アップデート
294
-
295
- エンジンディレクトリ内にあるファイルを全て消去し、新しいものに置き換えてください。
296
-
297
- ## Docker イメージ
298
-
299
- ### CPU
300
-
301
- ```bash
302
- docker pull voicevox/voicevox_engine:cpu-ubuntu20.04-latest
303
- docker run --rm -p '127.0.0.1:50021:50021' voicevox/voicevox_engine:cpu-ubuntu20.04-latest
304
- ```
305
-
306
- ### GPU
307
-
308
- ```bash
309
- docker pull voicevox/voicevox_engine:nvidia-ubuntu20.04-latest
310
- docker run --rm --gpus all -p '127.0.0.1:50021:50021' voicevox/voicevox_engine:nvidia-ubuntu20.04-latest
311
- ```
312
-
313
- #### トラブルシューティング
314
- GPU版を利用する場合、環境によってエラーが発生することがあります。その場合、`--runtime=nvidia`を`docker run`につけて実行すると解決できることがあります。
315
-
316
- ## 貢献者の方へ
317
-
318
- Issue を解決するプルリクエストを作成される際は、別の方と同じ Issue に取り組むことを避けるため、
319
- Issue 側で取り組み始めたことを伝えるか、最初に Draft プルリクエストを作成してください。
320
-
321
- [VOICEVOX 非公式 Discord サーバー](https://discord.gg/WMwWetrzuh)にて、開発の議論や雑談を行っています。気軽にご参加ください。
322
-
323
- ## 環境構築
324
-
325
- `Python 3.11.3` を用いて開発されています。
326
- インストールするには、各 OS ごとの C/C++ コンパイラ、CMake が必要になります。
327
-
328
- ```bash
329
- # 開発に必要なライブラリのインストール
330
- python -m pip install -r requirements-dev.txt -r requirements-test.txt
331
-
332
- # とりあえず実行したいだけなら代わりにこちら
333
- python -m pip install -r requirements.txt
334
- ```
335
-
336
- ## 実行
337
-
338
- コマンドライン引数の詳細は以下のコマンドで確認してください。
339
-
340
- ```bash
341
- python run.py --help
342
- ```
343
-
344
- ```bash
345
- # 製品版 VOICEVOX でサーバーを起動
346
- VOICEVOX_DIR="C:/path/to/voicevox" # 製品版 VOICEVOX ディレクトリのパス
347
- python run.py --voicevox_dir=$VOICEVOX_DIR
348
- ```
349
-
350
- <!-- 差し替え可能な音声ライブラリまたはその仕様が公開されたらコメントを外す
351
- ```bash
352
- # 音声ライブラリを差し替える
353
- VOICELIB_DIR="C:/path/to/your/tts-model"
354
- python run.py --voicevox_dir=$VOICEVOX_DIR --voicelib_dir=$VOICELIB_DIR
355
- ```
356
- -->
357
-
358
- ```bash
359
- # モックでサーバー起動
360
- python run.py --enable_mock
361
- ```
362
-
363
- ```bash
364
- # ログをUTF8に変更
365
- python run.py --output_log_utf8
366
- # もしくは VV_OUTPUT_LOG_UTF8=1 python run.py
367
- ```
368
-
369
- ### CPU スレッド数を指定する
370
-
371
- CPU スレッド数が未指定の場合は、論理コア数の半分か物理コア数が使われます。(殆どの CPU で、これは全体の処理能力の半分です)
372
- もし IaaS 上で実行していたり、専用サーバーで実行している場合など��
373
- エンジンが使う処理能力を調節したい場合は、CPU スレッド数を指定することで実現できます。
374
-
375
- - 実行時引数で指定する
376
-
377
- ```bash
378
- python run.py --voicevox_dir=$VOICEVOX_DIR --cpu_num_threads=4
379
- ```
380
-
381
- - 環境変数で指定する
382
- ```bash
383
- export VV_CPU_NUM_THREADS=4
384
- python run.py --voicevox_dir=$VOICEVOX_DIR
385
- ```
386
-
387
- ### 過去のバージョンのコアを使う
388
- VOICEVOX Core 0.5.4以降のコアを使用する事が可能です。
389
- Macでのlibtorch版コアのサポートはしていません。
390
-
391
- #### 過去のバイナリを指定する
392
- 製品版VOICEVOXもしくはコンパイル済みエンジンのディレクトリを`--voicevox_dir`引数で指定すると、そのバージョンのコアが使用されます。
393
- ```bash
394
- python run.py --voicevox_dir="/path/to/voicevox"
395
- ```
396
- Macでは、`DYLD_LIBRARY_PATH`の指定が必要です。
397
- ```bash
398
- DYLD_LIBRARY_PATH="/path/to/voicevox" python run.py --voicevox_dir="/path/to/voicevox"
399
- ```
400
-
401
- #### 音声ライブラリを直接指定する
402
- [VOICEVOX Coreのzipファイル](https://github.com/VOICEVOX/voicevox_core/releases)を解凍したディレクトリを`--voicelib_dir`引数で指定します。
403
- また、コアのバージョンに合わせて、[libtorch](https://pytorch.org/)や[onnxruntime](https://github.com/microsoft/onnxruntime)のディレクトリを`--runtime_dir`引数で指定します。
404
- ただし、システムの探索パス上にlibtorch、onnxruntimeがある場合、`--runtime_dir`引数の指定は不要です。
405
- `--voicelib_dir`引数、`--runtime_dir`引数は複数回使用可能です。
406
- APIエンドポイントでコアのバージョンを指定する場合は`core_version`引数を指定してください。(未指定の場合は最新のコアが使用されます)
407
- ```bash
408
- python run.py --voicelib_dir="/path/to/voicevox_core" --runtime_dir="/path/to/libtorch_or_onnx"
409
- ```
410
- Macでは、`--runtime_dir`引数の代わりに`DYLD_LIBRARY_PATH`の指定が必要です。
411
- ```bash
412
- DYLD_LIBRARY_PATH="/path/to/onnx" python run.py --voicelib_dir="/path/to/voicevox_core"
413
- ```
414
-
415
- ## コードフォーマット
416
-
417
- このソフトウェアでは、リモートにプッシュする前にコードフォーマットを確認する仕組み(静的解析ツール)を利用できます。
418
- 利用するには、開発に必要なライブラリのインストールに加えて、以下のコマンドを実行してください。
419
- プルリクエストを作成する際は、利用することを推奨します。
420
-
421
- ```bash
422
- pre-commit install -t pre-push
423
- ```
424
-
425
- エラーが出た際は、以下のコマンドで修正することが可能です。なお、完全に修正できるわけではないので注意してください。
426
-
427
- ```bash
428
- pysen run format lint
429
- ```
430
-
431
- ## タイポチェック
432
-
433
- [typos](https://github.com/crate-ci/typos) を使ってタイポのチェックを行っています。
434
- [typos をインストール](https://github.com/crate-ci/typos#install) した後
435
-
436
- ```bash
437
- typos
438
- ```
439
-
440
- でタイポチェックを行えます。
441
- もし誤判定やチェックから除外すべきファイルがあれば
442
- [設定ファイルの説明](https://github.com/crate-ci/typos#false-positives) に従って`_typos.toml`を編集してください。
443
-
444
- ## API ドキュメントの確認
445
-
446
- [API ドキュメント](https://voicevox.github.io/voicevox_engine/api/)(実体は`docs/api/index.html`)は自動で更新されます。
447
- 次のコマンドで API ドキュメントを手動で作成することができます。
448
-
449
- ```bash
450
- python make_docs.py
451
- ```
452
-
453
- ## ビルド
454
-
455
- この方法でビルドしたものは、リリースで公開されているものとは異なります。
456
- また、GPUで利用するにはcuDNNやCUDA、DirectMLなどのライブラリが追加で必要となります。
457
-
458
- ```bash
459
- python -m pip install -r requirements-dev.txt
460
-
461
- OUTPUT_LICENSE_JSON_PATH=licenses.json \
462
- bash build_util/create_venv_and_generate_licenses.bash
463
-
464
- # ビルド自体はLIBCORE_PATH及びLIBONNXRUNTIME_PATHの指定がなくても可能です
465
- LIBCORE_PATH="/path/to/libcore" \
466
- LIBONNXRUNTIME_PATH="/path/to/libonnxruntime" \
467
- pyinstaller --noconfirm run.spec
468
- ```
469
-
470
- ## 依存関係
471
-
472
- ### 更新
473
-
474
- [Poetry](https://python-poetry.org/) を用いて依存ライブラリのバージョンを固定しています。
475
- 以下のコマンドで操作できます:
476
-
477
- ```bash
478
- # パッケージを追加する場合
479
- poetry add `パッケージ名`
480
- poetry add --group dev `パッケージ名` # 開発依存の追加
481
- poetry add --group test `パッケージ名` # テスト依存の追加
482
-
483
- # パッケージをアップデートする場合
484
- poetry update `パッケージ名`
485
- poetry update # 全部更新
486
-
487
- # requirements.txtの更新
488
- poetry export --without-hashes -o requirements.txt # こちらを更新する場合は下3つも更新する必要があります。
489
- poetry export --without-hashes --with dev -o requirements-dev.txt
490
- poetry export --without-hashes --with test -o requirements-test.txt
491
- poetry export --without-hashes --with license -o requirements-license.txt
492
- ```
493
-
494
- ### ライセンス
495
-
496
- 依存ライブラリは「コアビルド時にリンクして一体化しても、コア部のコード非公開 OK」なライセンスを持つ必要があります。
497
- 主要ライセンスの可否は以下の通りです。
498
-
499
- - MIT/Apache/BSD-3: OK
500
- - LGPL: OK (コアと動的分離されているため)
501
- - GPL: NG (全関連コードの公開が必要なため)
502
-
503
- ## ユーザー辞書の更新について
504
-
505
- 以下のコマンドで openjtalk のユーザー辞書をコンパイルできます。
506
-
507
- ```bash
508
- python -c "import pyopenjtalk; pyopenjtalk.create_user_dict('default.csv','user.dic')"
509
- ```
510
-
511
- ## マルチエンジン機能に関して
512
-
513
- VOICEVOX エディターでは、複数のエンジンを同時に起動することができます。
514
- この機能を利用することで、自作の音声合成エンジンや既存の音声合成エンジンを VOICEVOX エディター上で動かすことが可能です。
515
-
516
- <img src="./docs/res/マルチエンジン概念図.svg" width="320">
517
-
518
- <details>
519
-
520
- ### マルチエンジン機能の仕組み
521
-
522
- VOICEVOX API に準拠した複数のエンジンの Web API をポートを分けて起動し、統一的に扱うことでマルチエンジン機能を実現しています。
523
- エディターがそれぞれのエンジンを実行バイナリ経由で起動し、EngineID と結びつけて設定や状態を個別管理します。
524
-
525
- ### マルチエンジン機能への対応方法
526
-
527
- VOICEVOX API 準拠エンジンを起動する実行バイナリを作ることで対応が可能です。
528
- VOICEVOX ENGINE リポジトリを fork し、一部の機能を改造するのが簡単です。
529
-
530
- 改造すべき点はエンジン情報・キャラクター情報・音声合成の3点です。
531
-
532
- エンジンの情報はエンジンマニフェスト(`engine_manifest.json`)で管理されています。
533
- マニフェストファイル内の情報を見て適宜変更してください。
534
- 音声合成手法によっては、例えばモーフィング機能など、VOICEVOX と同じ機能を持つことができない場合があります。
535
- その場合はマニフェストファイル内の`supported_features`内の情報を適宜変更してください。
536
-
537
- キャラクター情報は`speaker_info`ディレクトリ内のファイルで管理されています。
538
- ダミーのアイコンなどが用意されているので適宜変更してください。
539
-
540
- 音声合成は`voicevox_engine/synthesis_engine/synthesis_engine.py`で行われています。
541
- VOICEVOX API での音声合成は、エンジン側で音声合成クエリ`AudioQuery`の初期値を作成してユーザーに返し、ユーザーが必要に応じてクエリを編集したあと、エンジンがクエリに従って音声合成することで実現しています。
542
- クエリ作成は`/audio_query`エンドポイントで、音声合成は`/synthesis`エンドポイントで行っており、最低この2つに対応すれば VOICEVOX API に準拠したことになります。
543
-
544
- ### マルチエンジン機能対応エンジンの配布方法
545
-
546
- VVPP ファイルとして配布するのがおすすめです。
547
- VVPP は「VOICEVOX プラグインパッケージ」の略で、中身はビルドしたエンジンなどを含んだディレクトリの Zip ファイルです。
548
- 拡張子を`.vvpp`にすると、ダブルクリックで VOICEVOX エディターにインストールできます。
549
-
550
- エディター側は受け取った VVPP ファイルをローカルディスク上に Zip 展開したあと、ルートの直下にある`engine_manifest.json`に従ってファイルを探査します。
551
- VOICEVOX エディターにうまく読み込ませられないときは、エディターのエラーログを参照してください。
552
-
553
- また、`xxx.vvpp`は分割して連番を付けた`xxx.0.vvppp`ファイルとして配布することも可能です。
554
- これはファイル容量が大きくて配布が困難な場合に有用です。
555
-
556
- </details>
557
-
558
- ## GitHub Actions
559
-
560
- ### Variables
561
-
562
- | name | description |
563
- | :----------------- | :---------------------------------------------------------------------- |
564
- | DOCKERHUB_USERNAME | Docker Hub ユーザ名 |
565
-
566
- ### Secrets
567
-
568
- | name | description |
569
- | :----------------- | :---------------------------------------------------------------------- |
570
- | DOCKERHUB_TOKEN | [Docker Hub アクセストークン](https://hub.docker.com/settings/security) |
571
-
572
- ## 事例紹介
573
-
574
- **[voicevox-client](https://github.com/tuna2134/voicevox-client) [@tuna2134](https://github.com/tuna2134)** ・・・ VOICEVOX ENGINE のためのPythonラッパー
575
-
576
- ## ライセ���ス
577
-
578
- LGPL v3 と、ソースコードの公開が不要な別ライセンスのデュアルライセンスです。
579
- 別ライセンスを取得したい場合は、ヒホ(twitter: @hiho_karuta)に求めてください。
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/nets_123812KB.py DELETED
@@ -1,122 +0,0 @@
1
- import torch
2
- from torch import nn
3
- import torch.nn.functional as F
4
-
5
- from . import layers_123821KB as layers
6
-
7
-
8
- class BaseASPPNet(nn.Module):
9
- def __init__(self, nin, ch, dilations=(4, 8, 16)):
10
- super(BaseASPPNet, self).__init__()
11
- self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
12
- self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
13
- self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
14
- self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
15
-
16
- self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
17
-
18
- self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
19
- self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
20
- self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
21
- self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
22
-
23
- def __call__(self, x):
24
- h, e1 = self.enc1(x)
25
- h, e2 = self.enc2(h)
26
- h, e3 = self.enc3(h)
27
- h, e4 = self.enc4(h)
28
-
29
- h = self.aspp(h)
30
-
31
- h = self.dec4(h, e4)
32
- h = self.dec3(h, e3)
33
- h = self.dec2(h, e2)
34
- h = self.dec1(h, e1)
35
-
36
- return h
37
-
38
-
39
- class CascadedASPPNet(nn.Module):
40
- def __init__(self, n_fft):
41
- super(CascadedASPPNet, self).__init__()
42
- self.stg1_low_band_net = BaseASPPNet(2, 32)
43
- self.stg1_high_band_net = BaseASPPNet(2, 32)
44
-
45
- self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
46
- self.stg2_full_band_net = BaseASPPNet(16, 32)
47
-
48
- self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
49
- self.stg3_full_band_net = BaseASPPNet(32, 64)
50
-
51
- self.out = nn.Conv2d(64, 2, 1, bias=False)
52
- self.aux1_out = nn.Conv2d(32, 2, 1, bias=False)
53
- self.aux2_out = nn.Conv2d(32, 2, 1, bias=False)
54
-
55
- self.max_bin = n_fft // 2
56
- self.output_bin = n_fft // 2 + 1
57
-
58
- self.offset = 128
59
-
60
- def forward(self, x, aggressiveness=None):
61
- mix = x.detach()
62
- x = x.clone()
63
-
64
- x = x[:, :, : self.max_bin]
65
-
66
- bandw = x.size()[2] // 2
67
- aux1 = torch.cat(
68
- [
69
- self.stg1_low_band_net(x[:, :, :bandw]),
70
- self.stg1_high_band_net(x[:, :, bandw:]),
71
- ],
72
- dim=2,
73
- )
74
-
75
- h = torch.cat([x, aux1], dim=1)
76
- aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
77
-
78
- h = torch.cat([x, aux1, aux2], dim=1)
79
- h = self.stg3_full_band_net(self.stg3_bridge(h))
80
-
81
- mask = torch.sigmoid(self.out(h))
82
- mask = F.pad(
83
- input=mask,
84
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
85
- mode="replicate",
86
- )
87
-
88
- if self.training:
89
- aux1 = torch.sigmoid(self.aux1_out(aux1))
90
- aux1 = F.pad(
91
- input=aux1,
92
- pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
93
- mode="replicate",
94
- )
95
- aux2 = torch.sigmoid(self.aux2_out(aux2))
96
- aux2 = F.pad(
97
- input=aux2,
98
- pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
99
- mode="replicate",
100
- )
101
- return mask * mix, aux1 * mix, aux2 * mix
102
- else:
103
- if aggressiveness:
104
- mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
105
- mask[:, :, : aggressiveness["split_bin"]],
106
- 1 + aggressiveness["value"] / 3,
107
- )
108
- mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
109
- mask[:, :, aggressiveness["split_bin"] :],
110
- 1 + aggressiveness["value"],
111
- )
112
-
113
- return mask * mix
114
-
115
- def predict(self, x_mag, aggressiveness=None):
116
- h = self.forward(x_mag, aggressiveness)
117
-
118
- if self.offset > 0:
119
- h = h[:, :, :, self.offset : -self.offset]
120
- assert h.size()[3] > 0
121
-
122
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/audio/pitch/utils.py DELETED
@@ -1,82 +0,0 @@
1
- import numpy as np
2
- import torch
3
-
4
-
5
- def to_lf0(f0):
6
- f0[f0 < 1.0e-5] = 1.0e-6
7
- lf0 = f0.log() if isinstance(f0, torch.Tensor) else np.log(f0)
8
- lf0[f0 < 1.0e-5] = - 1.0E+10
9
- return lf0
10
-
11
-
12
- def to_f0(lf0):
13
- f0 = np.where(lf0 <= 0, 0.0, np.exp(lf0))
14
- return f0.flatten()
15
-
16
-
17
- def f0_to_coarse(f0, f0_bin=256, f0_max=900.0, f0_min=50.0):
18
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
19
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
20
- is_torch = isinstance(f0, torch.Tensor)
21
- f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700)
22
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
23
-
24
- f0_mel[f0_mel <= 1] = 1
25
- f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
26
- f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(int)
27
- assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min(), f0.min(), f0.max())
28
- return f0_coarse
29
-
30
-
31
- def coarse_to_f0(f0_coarse, f0_bin=256, f0_max=900.0, f0_min=50.0):
32
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
33
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
34
- uv = f0_coarse == 1
35
- f0 = f0_mel_min + (f0_coarse - 1) * (f0_mel_max - f0_mel_min) / (f0_bin - 2)
36
- f0 = ((f0 / 1127).exp() - 1) * 700
37
- f0[uv] = 0
38
- return f0
39
-
40
-
41
- def norm_f0(f0, uv, pitch_norm='log', f0_mean=400, f0_std=100):
42
- is_torch = isinstance(f0, torch.Tensor)
43
- if pitch_norm == 'standard':
44
- f0 = (f0 - f0_mean) / f0_std
45
- if pitch_norm == 'log':
46
- f0 = torch.log2(f0 + 1e-8) if is_torch else np.log2(f0 + 1e-8)
47
- if uv is not None:
48
- f0[uv > 0] = 0
49
- return f0
50
-
51
-
52
- def norm_interp_f0(f0, pitch_norm='log', f0_mean=None, f0_std=None):
53
- is_torch = isinstance(f0, torch.Tensor)
54
- if is_torch:
55
- device = f0.device
56
- f0 = f0.data.cpu().numpy()
57
- uv = f0 == 0
58
- f0 = norm_f0(f0, uv, pitch_norm, f0_mean, f0_std)
59
- if sum(uv) == len(f0):
60
- f0[uv] = 0
61
- elif sum(uv) > 0:
62
- f0[uv] = np.interp(np.where(uv)[0], np.where(~uv)[0], f0[~uv])
63
- if is_torch:
64
- uv = torch.FloatTensor(uv)
65
- f0 = torch.FloatTensor(f0)
66
- f0 = f0.to(device)
67
- uv = uv.to(device)
68
- return f0, uv
69
-
70
-
71
- def denorm_f0(f0, uv, pitch_norm='log', f0_mean=400, f0_std=100, pitch_padding=None, min=50, max=900):
72
- is_torch = isinstance(f0, torch.Tensor)
73
- if pitch_norm == 'standard':
74
- f0 = f0 * f0_std + f0_mean
75
- if pitch_norm == 'log':
76
- f0 = 2 ** f0
77
- f0 = f0.clamp(min=min, max=max) if is_torch else np.clip(f0, a_min=min, a_max=max)
78
- if uv is not None:
79
- f0[uv > 0] = 0
80
- if pitch_padding is not None:
81
- f0[pitch_padding] = 0
82
- return f0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/Software_Company/src/agents/Prompt/__init__.py DELETED
@@ -1 +0,0 @@
1
- from .base_Prompts import *
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/.ipynb_checkpoints/yolov5_s-v61_syncbn_fast_1xb32-100e_cat-checkpoint.py DELETED
@@ -1,135 +0,0 @@
1
- _base_ = '../yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py'
2
-
3
- max_epochs = 100 # 训练的最大 epoch
4
- data_root = './data-df2/' # 数据集目录的绝对路径
5
- # data_root = '/root/workspace/mmyolo/data/cat/' # Docker 容器里面数据集目录的绝对路径
6
-
7
- # 结果保存的路径,可以省略,省略保存的文件名位于 work_dirs 下 config 同名的文件夹中
8
- # 如果某个 config 只是修改了部分参数,修改这个变量就可以将新的训练文件保存到其他地方
9
- work_dir = './work_dirs/yolov5_s_df2'
10
-
11
- # load_from 可以指定本地路径或者 URL,设置了 URL 会自动进行下载,因为上面已经下载过,我们这里设置本地路径
12
- # 因为本教程是在 cat 数据集上微调,故这里需要使用 `load_from` 来加载 MMYOLO 中的预训练模型,这样可以在加快收敛速度的同时保证精度
13
- # load_from = './work_dirs/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth' # noqa
14
-
15
- # 根据自己的 GPU 情况,修改 batch size,YOLOv5-s 默认为 8卡 x 16bs
16
- train_batch_size_per_gpu = 32
17
- train_num_workers = 4 # 推荐使用 train_num_workers = nGPU x 4
18
-
19
- save_epoch_intervals = 2 # 每 interval 轮迭代进行一次保存一次权重
20
-
21
- # 根据自己的 GPU 情况,修改 base_lr,修改的比例是 base_lr_default * (your_bs / default_bs)
22
- base_lr = _base_.base_lr / 4
23
-
24
- anchors = [ # 此处已经根据数据集特点更新了 anchor,关于 anchor 的生成,后面小节会讲解
25
- [(68, 69), (154, 91), (143, 162)], # P3/8
26
- [(242, 160), (189, 287), (391, 207)], # P4/16
27
- [(353, 337), (539, 341), (443, 432)] # P5/32
28
- ]
29
-
30
- class_name = ('short_sleeved_shirt',
31
- 'long_sleeved_shirt',
32
- 'short_sleeved_outwear',
33
- 'long_sleeved_outwear',
34
- 'vest',
35
- 'sling',
36
- 'shorts',
37
- 'trousers',
38
- 'skirt',
39
- 'short_sleeved_dress',
40
- 'long_sleeved_dress',
41
- 'vest_dress',
42
- 'sling_dress') # 根据 class_with_id.txt 类别信息,设置 class_name
43
-
44
- num_classes = len(class_name)
45
- metainfo = dict(
46
- classes=class_name,
47
- palette=[(255, 0, 0),
48
- (255, 128, 0),
49
- (255, 255, 0),
50
- (128, 255, 0),
51
- (0, 255, 0),
52
- (0, 255, 128),
53
- (0, 255, 255),
54
- (0, 128, 255),
55
- (0, 0, 255),
56
- (127, 0, 255),
57
- (255, 0, 255),
58
- (255, 0, 127),
59
- (128, 128, 128)] # 画图时候的颜色,随便设置即可
60
- )
61
-
62
- train_cfg = dict(
63
- max_epochs=max_epochs,
64
- val_begin=20, # 第几个 epoch 后验证,这里设置 20 是因为前 20 个 epoch 精度不高,测试意义不大,故跳过
65
- val_interval=save_epoch_intervals # 每 val_interval 轮迭代进行一次测试评估
66
- # dynamic_intervals=[(max_epochs-_base_.num_last_epochs, 1)]
67
- )
68
-
69
- model = dict(
70
- bbox_head=dict(
71
- head_module=dict(num_classes=num_classes),
72
- prior_generator=dict(base_sizes=anchors),
73
-
74
- # loss_cls 会根据 num_classes 动态调整,但是 num_classes = 1 的时候,loss_cls 恒为 0
75
- loss_cls=dict(loss_weight=0.5 *
76
- (num_classes / 80 * 3 / _base_.num_det_layers))))
77
-
78
- train_dataloader = dict(
79
- batch_size=train_batch_size_per_gpu,
80
- num_workers=train_num_workers,
81
- dataset=dict(
82
- _delete_=True,
83
- type='RepeatDataset',
84
- # 数据量太少的话,可以使用 RepeatDataset ,在每个 epoch 内重复当前数据集 n 次,这里设置 5 是重复 5 次
85
- times=2,
86
- dataset=dict(
87
- type=_base_.dataset_type,
88
- data_root=data_root,
89
- metainfo=metainfo,
90
- ann_file='annotations/trainval.json',
91
- data_prefix=dict(img='smaller-dataset/'),
92
- filter_cfg=dict(filter_empty_gt=False, min_size=32),
93
- pipeline=_base_.train_pipeline)))
94
-
95
- val_dataloader = dict(
96
- dataset=dict(
97
- metainfo=metainfo,
98
- data_root=data_root,
99
- ann_file='annotations/trainval.json',
100
- data_prefix=dict(img='smaller-dataset/')))
101
-
102
- test_dataloader = val_dataloader
103
-
104
- val_evaluator = dict(ann_file=data_root + 'annotations/trainval.json')
105
- test_evaluator = val_evaluator
106
-
107
- optim_wrapper = dict(optimizer=dict(lr=base_lr))
108
-
109
- default_hooks = dict(
110
- # 设置间隔多少个 epoch 保存模型,以及保存模型最多几个,`save_best` 是另外保存最佳模型(推荐)
111
- checkpoint=dict(
112
- type='CheckpointHook',
113
- interval=save_epoch_intervals,
114
- max_keep_ckpts=5,
115
- save_best='auto'),
116
- param_scheduler=dict(max_epochs=max_epochs, warmup_mim_iter=10),
117
- # logger 输出的间隔
118
- logger=dict(type='LoggerHook', interval=10))
119
-
120
- # custom_hooks = [
121
- # dict(
122
- # type="EMAHook",
123
- # ema_type="ExpMomentumEMA",
124
- # momentum=0.0001,
125
- # update_buffers=True,
126
- # strict_load=False,
127
- # priority=49),
128
- # dict(
129
- # type="mmdet.PipelineSwitchHook",
130
- # switch_epoch=max_epochs-max_epochs-_base_.num_last_epochs,
131
- # switch_pipeline=_base_.train_pipeline_stage2
132
- # )
133
- # ]
134
-
135
- visualizer = dict(vis_backends=[dict(type='LocalVisBackend'), dict(type='WandbVisBackend')])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov6/yolov6_n_syncbn_fast_8xb32-300e_coco.py DELETED
@@ -1,21 +0,0 @@
1
- _base_ = './yolov6_s_syncbn_fast_8xb32-300e_coco.py'
2
-
3
- # ======================= Possible modified parameters =======================
4
- # -----model related-----
5
- # The scaling factor that controls the depth of the network structure
6
- deepen_factor = 0.33
7
- # The scaling factor that controls the width of the network structure
8
- widen_factor = 0.25
9
-
10
- # -----train val related-----
11
- lr_factor = 0.02 # Learning rate scaling factor
12
-
13
- # ============================== Unmodified in most cases ===================
14
- model = dict(
15
- backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor),
16
- neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor),
17
- bbox_head=dict(
18
- head_module=dict(widen_factor=widen_factor),
19
- loss_bbox=dict(iou_mode='siou')))
20
-
21
- default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/chart/Chart.js DELETED
@@ -1,65 +0,0 @@
1
- import Canvas from '../canvas/Canvas.js';
2
- import SetChart from './SetChart.js';
3
- import GetChartDataset from './GetChartDataset.js';
4
- import GetChartData from './GetChartData.js';
5
- import SetChartData from './SetChartData.js';
6
- import UpdateChart from './UpdateChart.js';
7
-
8
- // This plugin does not contain chart.js
9
- // Load chart.js in preload stage -
10
- // scene.load.script('chartjs', 'https://cdnjs.cloudflare.com/ajax/libs/Chart.js/3.8.0/Chart.min.js');
11
-
12
- class Chart extends Canvas {
13
- constructor(scene, x, y, width, height, config) {
14
- super(scene, x, y, width, height);
15
- this.type = 'rexChart';
16
- this.chart = undefined;
17
-
18
- if (config !== undefined) {
19
- this.setChart(config);
20
- }
21
- }
22
-
23
- destroy(fromScene) {
24
- // This Game Object has already been destroyed
25
- if (!this.scene) {
26
- return;
27
- }
28
- if (this.chart) {
29
- this.chart.destroy();
30
- this.chart = undefined;
31
- }
32
- super.destroy(fromScene);
33
- }
34
-
35
- resize(width, height) {
36
- if ((width === this.width) && (height === this.height)) {
37
- return this;
38
- }
39
-
40
- super.resize(width, height);
41
-
42
- if (this.chart) {
43
- var chart = this.chart;
44
- chart.height = this.canvas.height;
45
- chart.width = this.canvas.width;
46
- chart.aspectRatio = (chart.height) ? chart.width / chart.height : null;
47
- chart.update();
48
- }
49
- return this;
50
- }
51
- }
52
-
53
- var methods = {
54
- setChart: SetChart,
55
- getChartDataset: GetChartDataset,
56
- getChartData: GetChartData,
57
- setChartData: SetChartData,
58
- updateChart: UpdateChart,
59
- }
60
- Object.assign(
61
- Chart.prototype,
62
- methods
63
- );
64
-
65
- export default Chart;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/Builders.d.ts DELETED
@@ -1,82 +0,0 @@
1
- import BBCodeText from '../../bbcodetext/BBCodeText';
2
- import RoundRectangle from '../../roundrectangle/RoundRectangle';
3
- import NinePatch from '../../ninepatch/NinePatch';
4
- import NinePatch2 from '../../ninepatch2/NinePatch';
5
- import Canvas from '../../canvas/Canvas';
6
- import CircleMaskImage from '../../circlemaskimage/CircleMaskImage';
7
- import Space from '../../space/Space';
8
-
9
- import Sizer from '../../sizer/Sizer';
10
- import FixWidthSizer from '../../fixwidthsizer/FixWidthSizer';
11
- import GridSizer from '../../gridsizer/GridSizer';
12
- import OverlapSizer from '../../overlapsizer/OverlapSizer';
13
-
14
- import Buttons from '../../buttons/Buttons';
15
- import FixWidthButtons from '../../fixwidthbuttons/FixWidthButtons';
16
- import GridButtons from '../../gridbuttons/GridButtons';
17
-
18
- import Label from '../../label/Label';
19
- import BadgeLabel from '../../badgelabel/BadgeLabel';
20
- import Dialog from '../../dialog/Dialog';
21
- import TextBox from '../../textbox/TextBox';
22
- import Slider from '../../slider/Slider';
23
- import NumberBar from '../../numberbar/NumberBar';
24
- import ScrollBar from '../../scrollbar/ScrollBar';
25
- import TextArea from '../../textarea/TextArea';
26
- import Pages from '../../pages/Pages';
27
- import Toast from '../../toast/Toast';
28
- import Knob from '../../knob/Knob';
29
- import HolyGrail from '../../holygrail/HolyGrail';
30
- import Menu from '../../menu/Menu';
31
-
32
- export default Builders;
33
-
34
- declare namespace Builders {
35
- type BuilderTypeCommon<T> = (
36
- scene: Phaser.Scene,
37
- data: Object,
38
- view: Object,
39
- styles: Object,
40
- customBuilders: { [name: string]: BuilderType }
41
- ) => T;
42
-
43
- type BuilderType = BuilderTypeCommon<Phaser.GameObjects.GameObject>;
44
- }
45
-
46
- declare var Builders: {
47
- Image: Builders.BuilderTypeCommon<Phaser.GameObjects.Image>,
48
- Sprite: Builders.BuilderTypeCommon<Phaser.GameObjects.Sprite>,
49
- Video: Builders.BuilderTypeCommon<Phaser.GameObjects.Video>,
50
- Text: Builders.BuilderTypeCommon<Phaser.GameObjects.Text>,
51
- BBCodeText: Builders.BuilderTypeCommon<BBCodeText>,
52
- RoundRectangle: Builders.BuilderTypeCommon<RoundRectangle>,
53
- Ninepatch: Builders.BuilderTypeCommon<NinePatch>,
54
- Ninepatch2: Builders.BuilderTypeCommon<NinePatch2>,
55
- Canvas: Builders.BuilderTypeCommon<Canvas>,
56
- CircleMaskImage: Builders.BuilderTypeCommon<CircleMaskImage>,
57
- Space: Builders.BuilderTypeCommon<Space>,
58
-
59
- Sizer: Builders.BuilderTypeCommon<Sizer>,
60
- FixWidthSizer: Builders.BuilderTypeCommon<FixWidthSizer>,
61
- GridSizer: Builders.BuilderTypeCommon<GridSizer>,
62
- OverlapSizer: Builders.BuilderTypeCommon<OverlapSizer>,
63
-
64
- Buttons: Builders.BuilderTypeCommon<Buttons>,
65
- FixWidthButtons: Builders.BuilderTypeCommon<FixWidthButtons>,
66
- GridButtons: Builders.BuilderTypeCommon<GridButtons>,
67
-
68
- Label: Builders.BuilderTypeCommon<Label>,
69
- BadgeLabel: Builders.BuilderTypeCommon<BadgeLabel>,
70
- Dialog: Builders.BuilderTypeCommon<Dialog>,
71
- TextBox: Builders.BuilderTypeCommon<TextBox>,
72
- Slider: Builders.BuilderTypeCommon<Slider>,
73
- NumberBar: Builders.BuilderTypeCommon<NumberBar>,
74
- ScrollBar: Builders.BuilderTypeCommon<ScrollBar>,
75
- TextArea: Builders.BuilderTypeCommon<TextArea>,
76
- Pages: Builders.BuilderTypeCommon<Pages>,
77
- Toast: Builders.BuilderTypeCommon<Toast>,
78
- Knob: Builders.BuilderTypeCommon<Knob>,
79
- HolyGrail: Builders.BuilderTypeCommon<HolyGrail>,
80
- Menu: Builders.BuilderTypeCommon<Menu>,
81
-
82
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlekseyKorshuk/thin-plate-spline-motion-model/modules/dense_motion.py DELETED
@@ -1,164 +0,0 @@
1
- from torch import nn
2
- import torch.nn.functional as F
3
- import torch
4
- from modules.util import Hourglass, AntiAliasInterpolation2d, make_coordinate_grid, kp2gaussian
5
- from modules.util import to_homogeneous, from_homogeneous, UpBlock2d, TPS
6
- import math
7
-
8
- class DenseMotionNetwork(nn.Module):
9
- """
10
- Module that estimating an optical flow and multi-resolution occlusion masks
11
- from K TPS transformations and an affine transformation.
12
- """
13
-
14
- def __init__(self, block_expansion, num_blocks, max_features, num_tps, num_channels,
15
- scale_factor=0.25, bg = False, multi_mask = True, kp_variance=0.01):
16
- super(DenseMotionNetwork, self).__init__()
17
-
18
- if scale_factor != 1:
19
- self.down = AntiAliasInterpolation2d(num_channels, scale_factor)
20
- self.scale_factor = scale_factor
21
- self.multi_mask = multi_mask
22
-
23
- self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_channels * (num_tps+1) + num_tps*5+1),
24
- max_features=max_features, num_blocks=num_blocks)
25
-
26
- hourglass_output_size = self.hourglass.out_channels
27
- self.maps = nn.Conv2d(hourglass_output_size[-1], num_tps + 1, kernel_size=(7, 7), padding=(3, 3))
28
-
29
- if multi_mask:
30
- up = []
31
- self.up_nums = int(math.log(1/scale_factor, 2))
32
- self.occlusion_num = 4
33
-
34
- channel = [hourglass_output_size[-1]//(2**i) for i in range(self.up_nums)]
35
- for i in range(self.up_nums):
36
- up.append(UpBlock2d(channel[i], channel[i]//2, kernel_size=3, padding=1))
37
- self.up = nn.ModuleList(up)
38
-
39
- channel = [hourglass_output_size[-i-1] for i in range(self.occlusion_num-self.up_nums)[::-1]]
40
- for i in range(self.up_nums):
41
- channel.append(hourglass_output_size[-1]//(2**(i+1)))
42
- occlusion = []
43
-
44
- for i in range(self.occlusion_num):
45
- occlusion.append(nn.Conv2d(channel[i], 1, kernel_size=(7, 7), padding=(3, 3)))
46
- self.occlusion = nn.ModuleList(occlusion)
47
- else:
48
- occlusion = [nn.Conv2d(hourglass_output_size[-1], 1, kernel_size=(7, 7), padding=(3, 3))]
49
- self.occlusion = nn.ModuleList(occlusion)
50
-
51
- self.num_tps = num_tps
52
- self.bg = bg
53
- self.kp_variance = kp_variance
54
-
55
-
56
- def create_heatmap_representations(self, source_image, kp_driving, kp_source):
57
-
58
- spatial_size = source_image.shape[2:]
59
- gaussian_driving = kp2gaussian(kp_driving['fg_kp'], spatial_size=spatial_size, kp_variance=self.kp_variance)
60
- gaussian_source = kp2gaussian(kp_source['fg_kp'], spatial_size=spatial_size, kp_variance=self.kp_variance)
61
- heatmap = gaussian_driving - gaussian_source
62
-
63
- zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1]).type(heatmap.type()).to(heatmap.device)
64
- heatmap = torch.cat([zeros, heatmap], dim=1)
65
-
66
- return heatmap
67
-
68
- def create_transformations(self, source_image, kp_driving, kp_source, bg_param):
69
- # K TPS transformaions
70
- bs, _, h, w = source_image.shape
71
- kp_1 = kp_driving['fg_kp']
72
- kp_2 = kp_source['fg_kp']
73
- kp_1 = kp_1.view(bs, -1, 5, 2)
74
- kp_2 = kp_2.view(bs, -1, 5, 2)
75
- trans = TPS(mode = 'kp', bs = bs, kp_1 = kp_1, kp_2 = kp_2)
76
- driving_to_source = trans.transform_frame(source_image)
77
-
78
- identity_grid = make_coordinate_grid((h, w), type=kp_1.type()).to(kp_1.device)
79
- identity_grid = identity_grid.view(1, 1, h, w, 2)
80
- identity_grid = identity_grid.repeat(bs, 1, 1, 1, 1)
81
-
82
- # affine background transformation
83
- if not (bg_param is None):
84
- identity_grid = to_homogeneous(identity_grid)
85
- identity_grid = torch.matmul(bg_param.view(bs, 1, 1, 1, 3, 3), identity_grid.unsqueeze(-1)).squeeze(-1)
86
- identity_grid = from_homogeneous(identity_grid)
87
-
88
- transformations = torch.cat([identity_grid, driving_to_source], dim=1)
89
- return transformations
90
-
91
- def create_deformed_source_image(self, source_image, transformations):
92
-
93
- bs, _, h, w = source_image.shape
94
- source_repeat = source_image.unsqueeze(1).unsqueeze(1).repeat(1, self.num_tps + 1, 1, 1, 1, 1)
95
- source_repeat = source_repeat.view(bs * (self.num_tps + 1), -1, h, w)
96
- transformations = transformations.view((bs * (self.num_tps + 1), h, w, -1))
97
- deformed = F.grid_sample(source_repeat, transformations, align_corners=True)
98
- deformed = deformed.view((bs, self.num_tps+1, -1, h, w))
99
- return deformed
100
-
101
- def dropout_softmax(self, X, P):
102
- '''
103
- Dropout for TPS transformations. Eq(7) and Eq(8) in the paper.
104
- '''
105
- drop = (torch.rand(X.shape[0],X.shape[1]) < (1-P)).type(X.type()).to(X.device)
106
- drop[..., 0] = 1
107
- drop = drop.repeat(X.shape[2],X.shape[3],1,1).permute(2,3,0,1)
108
-
109
- maxx = X.max(1).values.unsqueeze_(1)
110
- X = X - maxx
111
- X_exp = X.exp()
112
- X[:,1:,...] /= (1-P)
113
- mask_bool =(drop == 0)
114
- X_exp = X_exp.masked_fill(mask_bool, 0)
115
- partition = X_exp.sum(dim=1, keepdim=True) + 1e-6
116
- return X_exp / partition
117
-
118
- def forward(self, source_image, kp_driving, kp_source, bg_param = None, dropout_flag=False, dropout_p = 0):
119
- if self.scale_factor != 1:
120
- source_image = self.down(source_image)
121
-
122
- bs, _, h, w = source_image.shape
123
-
124
- out_dict = dict()
125
- heatmap_representation = self.create_heatmap_representations(source_image, kp_driving, kp_source)
126
- transformations = self.create_transformations(source_image, kp_driving, kp_source, bg_param)
127
- deformed_source = self.create_deformed_source_image(source_image, transformations)
128
- out_dict['deformed_source'] = deformed_source
129
- # out_dict['transformations'] = transformations
130
- deformed_source = deformed_source.view(bs,-1,h,w)
131
- input = torch.cat([heatmap_representation, deformed_source], dim=1)
132
- input = input.view(bs, -1, h, w)
133
-
134
- prediction = self.hourglass(input, mode = 1)
135
-
136
- contribution_maps = self.maps(prediction[-1])
137
- if(dropout_flag):
138
- contribution_maps = self.dropout_softmax(contribution_maps, dropout_p)
139
- else:
140
- contribution_maps = F.softmax(contribution_maps, dim=1)
141
- out_dict['contribution_maps'] = contribution_maps
142
-
143
- # Combine the K+1 transformations
144
- # Eq(6) in the paper
145
- contribution_maps = contribution_maps.unsqueeze(2)
146
- transformations = transformations.permute(0, 1, 4, 2, 3)
147
- deformation = (transformations * contribution_maps).sum(dim=1)
148
- deformation = deformation.permute(0, 2, 3, 1)
149
-
150
- out_dict['deformation'] = deformation # Optical Flow
151
-
152
- occlusion_map = []
153
- if self.multi_mask:
154
- for i in range(self.occlusion_num-self.up_nums):
155
- occlusion_map.append(torch.sigmoid(self.occlusion[i](prediction[self.up_nums-self.occlusion_num+i])))
156
- prediction = prediction[-1]
157
- for i in range(self.up_nums):
158
- prediction = self.up[i](prediction)
159
- occlusion_map.append(torch.sigmoid(self.occlusion[i+self.occlusion_num-self.up_nums](prediction)))
160
- else:
161
- occlusion_map.append(torch.sigmoid(self.occlusion[0](prediction[-1])))
162
-
163
- out_dict['occlusion_map'] = occlusion_map # Multi-resolution Occlusion Masks
164
- return out_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/saicinpainting/training/data/masks.py DELETED
@@ -1,332 +0,0 @@
1
- import math
2
- import random
3
- import hashlib
4
- import logging
5
- from enum import Enum
6
-
7
- import cv2
8
- import numpy as np
9
-
10
- from saicinpainting.evaluation.masks.mask import SegmentationMask
11
- from saicinpainting.utils import LinearRamp
12
-
13
- LOGGER = logging.getLogger(__name__)
14
-
15
-
16
- class DrawMethod(Enum):
17
- LINE = 'line'
18
- CIRCLE = 'circle'
19
- SQUARE = 'square'
20
-
21
-
22
- def make_random_irregular_mask(shape, max_angle=4, max_len=60, max_width=20, min_times=0, max_times=10,
23
- draw_method=DrawMethod.LINE):
24
- draw_method = DrawMethod(draw_method)
25
-
26
- height, width = shape
27
- mask = np.zeros((height, width), np.float32)
28
- times = np.random.randint(min_times, max_times + 1)
29
- for i in range(times):
30
- start_x = np.random.randint(width)
31
- start_y = np.random.randint(height)
32
- for j in range(1 + np.random.randint(5)):
33
- angle = 0.01 + np.random.randint(max_angle)
34
- if i % 2 == 0:
35
- angle = 2 * 3.1415926 - angle
36
- length = 10 + np.random.randint(max_len)
37
- brush_w = 5 + np.random.randint(max_width)
38
- end_x = np.clip((start_x + length * np.sin(angle)).astype(np.int32), 0, width)
39
- end_y = np.clip((start_y + length * np.cos(angle)).astype(np.int32), 0, height)
40
- if draw_method == DrawMethod.LINE:
41
- cv2.line(mask, (start_x, start_y), (end_x, end_y), 1.0, brush_w)
42
- elif draw_method == DrawMethod.CIRCLE:
43
- cv2.circle(mask, (start_x, start_y), radius=brush_w, color=1., thickness=-1)
44
- elif draw_method == DrawMethod.SQUARE:
45
- radius = brush_w // 2
46
- mask[start_y - radius:start_y + radius, start_x - radius:start_x + radius] = 1
47
- start_x, start_y = end_x, end_y
48
- return mask[None, ...]
49
-
50
-
51
- class RandomIrregularMaskGenerator:
52
- def __init__(self, max_angle=4, max_len=60, max_width=20, min_times=0, max_times=10, ramp_kwargs=None,
53
- draw_method=DrawMethod.LINE):
54
- self.max_angle = max_angle
55
- self.max_len = max_len
56
- self.max_width = max_width
57
- self.min_times = min_times
58
- self.max_times = max_times
59
- self.draw_method = draw_method
60
- self.ramp = LinearRamp(**ramp_kwargs) if ramp_kwargs is not None else None
61
-
62
- def __call__(self, img, iter_i=None, raw_image=None):
63
- coef = self.ramp(iter_i) if (self.ramp is not None) and (iter_i is not None) else 1
64
- cur_max_len = int(max(1, self.max_len * coef))
65
- cur_max_width = int(max(1, self.max_width * coef))
66
- cur_max_times = int(self.min_times + 1 + (self.max_times - self.min_times) * coef)
67
- return make_random_irregular_mask(img.shape[1:], max_angle=self.max_angle, max_len=cur_max_len,
68
- max_width=cur_max_width, min_times=self.min_times, max_times=cur_max_times,
69
- draw_method=self.draw_method)
70
-
71
-
72
- def make_random_rectangle_mask(shape, margin=10, bbox_min_size=30, bbox_max_size=100, min_times=0, max_times=3):
73
- height, width = shape
74
- mask = np.zeros((height, width), np.float32)
75
- bbox_max_size = min(bbox_max_size, height - margin * 2, width - margin * 2)
76
- times = np.random.randint(min_times, max_times + 1)
77
- for i in range(times):
78
- box_width = np.random.randint(bbox_min_size, bbox_max_size)
79
- box_height = np.random.randint(bbox_min_size, bbox_max_size)
80
- start_x = np.random.randint(margin, width - margin - box_width + 1)
81
- start_y = np.random.randint(margin, height - margin - box_height + 1)
82
- mask[start_y:start_y + box_height, start_x:start_x + box_width] = 1
83
- return mask[None, ...]
84
-
85
-
86
- class RandomRectangleMaskGenerator:
87
- def __init__(self, margin=10, bbox_min_size=30, bbox_max_size=100, min_times=0, max_times=3, ramp_kwargs=None):
88
- self.margin = margin
89
- self.bbox_min_size = bbox_min_size
90
- self.bbox_max_size = bbox_max_size
91
- self.min_times = min_times
92
- self.max_times = max_times
93
- self.ramp = LinearRamp(**ramp_kwargs) if ramp_kwargs is not None else None
94
-
95
- def __call__(self, img, iter_i=None, raw_image=None):
96
- coef = self.ramp(iter_i) if (self.ramp is not None) and (iter_i is not None) else 1
97
- cur_bbox_max_size = int(self.bbox_min_size + 1 + (self.bbox_max_size - self.bbox_min_size) * coef)
98
- cur_max_times = int(self.min_times + (self.max_times - self.min_times) * coef)
99
- return make_random_rectangle_mask(img.shape[1:], margin=self.margin, bbox_min_size=self.bbox_min_size,
100
- bbox_max_size=cur_bbox_max_size, min_times=self.min_times,
101
- max_times=cur_max_times)
102
-
103
-
104
- class RandomSegmentationMaskGenerator:
105
- def __init__(self, **kwargs):
106
- self.impl = None # will be instantiated in first call (effectively in subprocess)
107
- self.kwargs = kwargs
108
-
109
- def __call__(self, img, iter_i=None, raw_image=None):
110
- if self.impl is None:
111
- self.impl = SegmentationMask(**self.kwargs)
112
-
113
- masks = self.impl.get_masks(np.transpose(img, (1, 2, 0)))
114
- masks = [m for m in masks if len(np.unique(m)) > 1]
115
- return np.random.choice(masks)
116
-
117
-
118
- def make_random_superres_mask(shape, min_step=2, max_step=4, min_width=1, max_width=3):
119
- height, width = shape
120
- mask = np.zeros((height, width), np.float32)
121
- step_x = np.random.randint(min_step, max_step + 1)
122
- width_x = np.random.randint(min_width, min(step_x, max_width + 1))
123
- offset_x = np.random.randint(0, step_x)
124
-
125
- step_y = np.random.randint(min_step, max_step + 1)
126
- width_y = np.random.randint(min_width, min(step_y, max_width + 1))
127
- offset_y = np.random.randint(0, step_y)
128
-
129
- for dy in range(width_y):
130
- mask[offset_y + dy::step_y] = 1
131
- for dx in range(width_x):
132
- mask[:, offset_x + dx::step_x] = 1
133
- return mask[None, ...]
134
-
135
-
136
- class RandomSuperresMaskGenerator:
137
- def __init__(self, **kwargs):
138
- self.kwargs = kwargs
139
-
140
- def __call__(self, img, iter_i=None):
141
- return make_random_superres_mask(img.shape[1:], **self.kwargs)
142
-
143
-
144
- class DumbAreaMaskGenerator:
145
- min_ratio = 0.1
146
- max_ratio = 0.35
147
- default_ratio = 0.225
148
-
149
- def __init__(self, is_training):
150
- #Parameters:
151
- # is_training(bool): If true - random rectangular mask, if false - central square mask
152
- self.is_training = is_training
153
-
154
- def _random_vector(self, dimension):
155
- if self.is_training:
156
- lower_limit = math.sqrt(self.min_ratio)
157
- upper_limit = math.sqrt(self.max_ratio)
158
- mask_side = round((random.random() * (upper_limit - lower_limit) + lower_limit) * dimension)
159
- u = random.randint(0, dimension-mask_side-1)
160
- v = u+mask_side
161
- else:
162
- margin = (math.sqrt(self.default_ratio) / 2) * dimension
163
- u = round(dimension/2 - margin)
164
- v = round(dimension/2 + margin)
165
- return u, v
166
-
167
- def __call__(self, img, iter_i=None, raw_image=None):
168
- c, height, width = img.shape
169
- mask = np.zeros((height, width), np.float32)
170
- x1, x2 = self._random_vector(width)
171
- y1, y2 = self._random_vector(height)
172
- mask[x1:x2, y1:y2] = 1
173
- return mask[None, ...]
174
-
175
-
176
- class OutpaintingMaskGenerator:
177
- def __init__(self, min_padding_percent:float=0.04, max_padding_percent:int=0.25, left_padding_prob:float=0.5, top_padding_prob:float=0.5,
178
- right_padding_prob:float=0.5, bottom_padding_prob:float=0.5, is_fixed_randomness:bool=False):
179
- """
180
- is_fixed_randomness - get identical paddings for the same image if args are the same
181
- """
182
- self.min_padding_percent = min_padding_percent
183
- self.max_padding_percent = max_padding_percent
184
- self.probs = [left_padding_prob, top_padding_prob, right_padding_prob, bottom_padding_prob]
185
- self.is_fixed_randomness = is_fixed_randomness
186
-
187
- assert self.min_padding_percent <= self.max_padding_percent
188
- assert self.max_padding_percent > 0
189
- assert len([x for x in [self.min_padding_percent, self.max_padding_percent] if (x>=0 and x<=1)]) == 2, f"Padding percentage should be in [0,1]"
190
- assert sum(self.probs) > 0, f"At least one of the padding probs should be greater than 0 - {self.probs}"
191
- assert len([x for x in self.probs if (x >= 0) and (x <= 1)]) == 4, f"At least one of padding probs is not in [0,1] - {self.probs}"
192
- if len([x for x in self.probs if x > 0]) == 1:
193
- LOGGER.warning(f"Only one padding prob is greater than zero - {self.probs}. That means that the outpainting masks will be always on the same side")
194
-
195
- def apply_padding(self, mask, coord):
196
- mask[int(coord[0][0]*self.img_h):int(coord[1][0]*self.img_h),
197
- int(coord[0][1]*self.img_w):int(coord[1][1]*self.img_w)] = 1
198
- return mask
199
-
200
- def get_padding(self, size):
201
- n1 = int(self.min_padding_percent*size)
202
- n2 = int(self.max_padding_percent*size)
203
- return self.rnd.randint(n1, n2) / size
204
-
205
- @staticmethod
206
- def _img2rs(img):
207
- arr = np.ascontiguousarray(img.astype(np.uint8))
208
- str_hash = hashlib.sha1(arr).hexdigest()
209
- res = hash(str_hash)%(2**32)
210
- return res
211
-
212
- def __call__(self, img, iter_i=None, raw_image=None):
213
- c, self.img_h, self.img_w = img.shape
214
- mask = np.zeros((self.img_h, self.img_w), np.float32)
215
- at_least_one_mask_applied = False
216
-
217
- if self.is_fixed_randomness:
218
- assert raw_image is not None, f"Cant calculate hash on raw_image=None"
219
- rs = self._img2rs(raw_image)
220
- self.rnd = np.random.RandomState(rs)
221
- else:
222
- self.rnd = np.random
223
-
224
- coords = [[
225
- (0,0),
226
- (1,self.get_padding(size=self.img_h))
227
- ],
228
- [
229
- (0,0),
230
- (self.get_padding(size=self.img_w),1)
231
- ],
232
- [
233
- (0,1-self.get_padding(size=self.img_h)),
234
- (1,1)
235
- ],
236
- [
237
- (1-self.get_padding(size=self.img_w),0),
238
- (1,1)
239
- ]]
240
-
241
- for pp, coord in zip(self.probs, coords):
242
- if self.rnd.random() < pp:
243
- at_least_one_mask_applied = True
244
- mask = self.apply_padding(mask=mask, coord=coord)
245
-
246
- if not at_least_one_mask_applied:
247
- idx = self.rnd.choice(range(len(coords)), p=np.array(self.probs)/sum(self.probs))
248
- mask = self.apply_padding(mask=mask, coord=coords[idx])
249
- return mask[None, ...]
250
-
251
-
252
- class MixedMaskGenerator:
253
- def __init__(self, irregular_proba=1/3, irregular_kwargs=None,
254
- box_proba=1/3, box_kwargs=None,
255
- segm_proba=1/3, segm_kwargs=None,
256
- squares_proba=0, squares_kwargs=None,
257
- superres_proba=0, superres_kwargs=None,
258
- outpainting_proba=0, outpainting_kwargs=None,
259
- invert_proba=0):
260
- self.probas = []
261
- self.gens = []
262
-
263
- if irregular_proba > 0:
264
- self.probas.append(irregular_proba)
265
- if irregular_kwargs is None:
266
- irregular_kwargs = {}
267
- else:
268
- irregular_kwargs = dict(irregular_kwargs)
269
- irregular_kwargs['draw_method'] = DrawMethod.LINE
270
- self.gens.append(RandomIrregularMaskGenerator(**irregular_kwargs))
271
-
272
- if box_proba > 0:
273
- self.probas.append(box_proba)
274
- if box_kwargs is None:
275
- box_kwargs = {}
276
- self.gens.append(RandomRectangleMaskGenerator(**box_kwargs))
277
-
278
- if segm_proba > 0:
279
- self.probas.append(segm_proba)
280
- if segm_kwargs is None:
281
- segm_kwargs = {}
282
- self.gens.append(RandomSegmentationMaskGenerator(**segm_kwargs))
283
-
284
- if squares_proba > 0:
285
- self.probas.append(squares_proba)
286
- if squares_kwargs is None:
287
- squares_kwargs = {}
288
- else:
289
- squares_kwargs = dict(squares_kwargs)
290
- squares_kwargs['draw_method'] = DrawMethod.SQUARE
291
- self.gens.append(RandomIrregularMaskGenerator(**squares_kwargs))
292
-
293
- if superres_proba > 0:
294
- self.probas.append(superres_proba)
295
- if superres_kwargs is None:
296
- superres_kwargs = {}
297
- self.gens.append(RandomSuperresMaskGenerator(**superres_kwargs))
298
-
299
- if outpainting_proba > 0:
300
- self.probas.append(outpainting_proba)
301
- if outpainting_kwargs is None:
302
- outpainting_kwargs = {}
303
- self.gens.append(OutpaintingMaskGenerator(**outpainting_kwargs))
304
-
305
- self.probas = np.array(self.probas, dtype='float32')
306
- self.probas /= self.probas.sum()
307
- self.invert_proba = invert_proba
308
-
309
- def __call__(self, img, iter_i=None, raw_image=None):
310
- kind = np.random.choice(len(self.probas), p=self.probas)
311
- gen = self.gens[kind]
312
- result = gen(img, iter_i=iter_i, raw_image=raw_image)
313
- if self.invert_proba > 0 and random.random() < self.invert_proba:
314
- result = 1 - result
315
- return result
316
-
317
-
318
- def get_mask_generator(kind, kwargs):
319
- if kind is None:
320
- kind = "mixed"
321
- if kwargs is None:
322
- kwargs = {}
323
-
324
- if kind == "mixed":
325
- cl = MixedMaskGenerator
326
- elif kind == "outpainting":
327
- cl = OutpaintingMaskGenerator
328
- elif kind == "dumb":
329
- cl = DumbAreaMaskGenerator
330
- else:
331
- raise NotImplementedError(f"No such generator kind = {kind}")
332
- return cl(**kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexZou/Deploy_Restoration/net/PositionalEncoding.py DELETED
@@ -1,35 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
-
5
- #实现了位置编码
6
- class FixedPositionalEncoding(nn.Module):
7
- def __init__(self, embedding_dim, max_length=512):
8
- super(FixedPositionalEncoding, self).__init__()
9
-
10
- pe = torch.zeros(max_length, embedding_dim)
11
- position = torch.arange(0, max_length, dtype=torch.float).unsqueeze(1)
12
- div_term = torch.exp(
13
- torch.arange(0, embedding_dim, 2).float()
14
- * (-torch.log(torch.tensor(10000.0)) / embedding_dim)
15
- )
16
- pe[:, 0::2] = torch.sin(position * div_term)
17
- pe[:, 1::2] = torch.cos(position * div_term)
18
- pe = pe.unsqueeze(0).transpose(0, 1)
19
- self.register_buffer('pe', pe)
20
-
21
- def forward(self, x):
22
- x = x + self.pe[: x.size(0), :]
23
- return x
24
-
25
-
26
- class LearnedPositionalEncoding(nn.Module):
27
- def __init__(self, max_position_embeddings, embedding_dim, seq_length):
28
- super(LearnedPositionalEncoding, self).__init__()
29
-
30
- self.position_embeddings = nn.Parameter(torch.zeros(1, 256, 512)) #8x
31
-
32
- def forward(self, x, position_ids=None):
33
-
34
- position_embeddings = self.position_embeddings
35
- return x + position_embeddings
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andres99/Tune-A-Video-Training-UI/constants.py DELETED
@@ -1,10 +0,0 @@
1
- import enum
2
-
3
-
4
- class UploadTarget(enum.Enum):
5
- PERSONAL_PROFILE = 'Personal Profile'
6
- MODEL_LIBRARY = 'Tune-A-Video Library'
7
-
8
-
9
- MODEL_LIBRARY_ORG_NAME = 'Tune-A-Video-library'
10
- SAMPLE_MODEL_REPO = 'Tune-A-Video-library/a-man-is-surfing'
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712_cocofmt.py DELETED
@@ -1,75 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
3
- '../_base_/default_runtime.py'
4
- ]
5
- model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
6
-
7
- CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
8
- 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
9
- 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
10
-
11
- # dataset settings
12
- dataset_type = 'CocoDataset'
13
- data_root = 'data/VOCdevkit/'
14
- img_norm_cfg = dict(
15
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
16
- train_pipeline = [
17
- dict(type='LoadImageFromFile'),
18
- dict(type='LoadAnnotations', with_bbox=True),
19
- dict(type='Resize', img_scale=(1000, 600), keep_ratio=True),
20
- dict(type='RandomFlip', flip_ratio=0.5),
21
- dict(type='Normalize', **img_norm_cfg),
22
- dict(type='Pad', size_divisor=32),
23
- dict(type='DefaultFormatBundle'),
24
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
25
- ]
26
- test_pipeline = [
27
- dict(type='LoadImageFromFile'),
28
- dict(
29
- type='MultiScaleFlipAug',
30
- img_scale=(1000, 600),
31
- flip=False,
32
- transforms=[
33
- dict(type='Resize', keep_ratio=True),
34
- dict(type='RandomFlip'),
35
- dict(type='Normalize', **img_norm_cfg),
36
- dict(type='Pad', size_divisor=32),
37
- dict(type='ImageToTensor', keys=['img']),
38
- dict(type='Collect', keys=['img']),
39
- ])
40
- ]
41
- data = dict(
42
- samples_per_gpu=2,
43
- workers_per_gpu=2,
44
- train=dict(
45
- type='RepeatDataset',
46
- times=3,
47
- dataset=dict(
48
- type=dataset_type,
49
- ann_file='data/voc0712_trainval.json',
50
- img_prefix='data/VOCdevkit',
51
- pipeline=train_pipeline,
52
- classes=CLASSES)),
53
- val=dict(
54
- type=dataset_type,
55
- ann_file='data/voc07_test.json',
56
- img_prefix='data/VOCdevkit',
57
- pipeline=test_pipeline,
58
- classes=CLASSES),
59
- test=dict(
60
- type=dataset_type,
61
- ann_file='data/voc07_test.json',
62
- img_prefix='data/VOCdevkit',
63
- pipeline=test_pipeline,
64
- classes=CLASSES))
65
- evaluation = dict(interval=1, metric='bbox')
66
-
67
- # optimizer
68
- optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
69
- optimizer_config = dict(grad_clip=None)
70
- # learning policy
71
- # actual epoch = 3 * 3 = 9
72
- lr_config = dict(policy='step', step=[3])
73
- # runtime settings
74
- runner = dict(
75
- type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './nonlocal_r50-d8_512x512_20k_voc12aug.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Audio-AGI/WavJourney/scripts/start_ui.sh DELETED
@@ -1 +0,0 @@
1
- conda run --live-stream -n WavJourney python -u ui_client.py 2>&1 | stdbuf -oL tee services_logs/wavejourney.out
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/lvis.py DELETED
@@ -1,240 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import logging
3
- import os
4
- from fvcore.common.timer import Timer
5
-
6
- from detectron2.data import DatasetCatalog, MetadataCatalog
7
- from detectron2.structures import BoxMode
8
- from detectron2.utils.file_io import PathManager
9
-
10
- from .builtin_meta import _get_coco_instances_meta
11
- from .lvis_v0_5_categories import LVIS_CATEGORIES as LVIS_V0_5_CATEGORIES
12
- from .lvis_v1_categories import LVIS_CATEGORIES as LVIS_V1_CATEGORIES
13
-
14
- """
15
- This file contains functions to parse LVIS-format annotations into dicts in the
16
- "Detectron2 format".
17
- """
18
-
19
- logger = logging.getLogger(__name__)
20
-
21
- __all__ = ["load_lvis_json", "register_lvis_instances", "get_lvis_instances_meta"]
22
-
23
-
24
- def register_lvis_instances(name, metadata, json_file, image_root):
25
- """
26
- Register a dataset in LVIS's json annotation format for instance detection and segmentation.
27
-
28
- Args:
29
- name (str): a name that identifies the dataset, e.g. "lvis_v0.5_train".
30
- metadata (dict): extra metadata associated with this dataset. It can be an empty dict.
31
- json_file (str): path to the json instance annotation file.
32
- image_root (str or path-like): directory which contains all the images.
33
- """
34
- DatasetCatalog.register(name, lambda: load_lvis_json(json_file, image_root, name))
35
- MetadataCatalog.get(name).set(
36
- json_file=json_file, image_root=image_root, evaluator_type="lvis", **metadata
37
- )
38
-
39
-
40
- def load_lvis_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):
41
- """
42
- Load a json file in LVIS's annotation format.
43
-
44
- Args:
45
- json_file (str): full path to the LVIS json annotation file.
46
- image_root (str): the directory where the images in this json file exists.
47
- dataset_name (str): the name of the dataset (e.g., "lvis_v0.5_train").
48
- If provided, this function will put "thing_classes" into the metadata
49
- associated with this dataset.
50
- extra_annotation_keys (list[str]): list of per-annotation keys that should also be
51
- loaded into the dataset dict (besides "bbox", "bbox_mode", "category_id",
52
- "segmentation"). The values for these keys will be returned as-is.
53
-
54
- Returns:
55
- list[dict]: a list of dicts in Detectron2 standard format. (See
56
- `Using Custom Datasets </tutorials/datasets.html>`_ )
57
-
58
- Notes:
59
- 1. This function does not read the image files.
60
- The results do not have the "image" field.
61
- """
62
- from lvis import LVIS
63
-
64
- json_file = PathManager.get_local_path(json_file)
65
-
66
- timer = Timer()
67
- lvis_api = LVIS(json_file)
68
- if timer.seconds() > 1:
69
- logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
70
-
71
- if dataset_name is not None:
72
- meta = get_lvis_instances_meta(dataset_name)
73
- MetadataCatalog.get(dataset_name).set(**meta)
74
-
75
- # sort indices for reproducible results
76
- img_ids = sorted(lvis_api.imgs.keys())
77
- # imgs is a list of dicts, each looks something like:
78
- # {'license': 4,
79
- # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
80
- # 'file_name': 'COCO_val2014_000000001268.jpg',
81
- # 'height': 427,
82
- # 'width': 640,
83
- # 'date_captured': '2013-11-17 05:57:24',
84
- # 'id': 1268}
85
- imgs = lvis_api.load_imgs(img_ids)
86
- # anns is a list[list[dict]], where each dict is an annotation
87
- # record for an object. The inner list enumerates the objects in an image
88
- # and the outer list enumerates over images. Example of anns[0]:
89
- # [{'segmentation': [[192.81,
90
- # 247.09,
91
- # ...
92
- # 219.03,
93
- # 249.06]],
94
- # 'area': 1035.749,
95
- # 'image_id': 1268,
96
- # 'bbox': [192.81, 224.8, 74.73, 33.43],
97
- # 'category_id': 16,
98
- # 'id': 42986},
99
- # ...]
100
- anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
101
-
102
- # Sanity check that each annotation has a unique id
103
- ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
104
- assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique".format(
105
- json_file
106
- )
107
-
108
- imgs_anns = list(zip(imgs, anns))
109
-
110
- logger.info("Loaded {} images in the LVIS format from {}".format(len(imgs_anns), json_file))
111
-
112
- if extra_annotation_keys:
113
- logger.info(
114
- "The following extra annotation keys will be loaded: {} ".format(extra_annotation_keys)
115
- )
116
- else:
117
- extra_annotation_keys = []
118
-
119
- def get_file_name(img_root, img_dict):
120
- # Determine the path including the split folder ("train2017", "val2017", "test2017") from
121
- # the coco_url field. Example:
122
- # 'coco_url': 'http://images.cocodataset.org/train2017/000000155379.jpg'
123
- split_folder, file_name = img_dict["coco_url"].split("/")[-2:]
124
- return os.path.join(img_root + split_folder, file_name)
125
-
126
- dataset_dicts = []
127
-
128
- for (img_dict, anno_dict_list) in imgs_anns:
129
- record = {}
130
- record["file_name"] = get_file_name(image_root, img_dict)
131
- record["height"] = img_dict["height"]
132
- record["width"] = img_dict["width"]
133
- record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", [])
134
- record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
135
- image_id = record["image_id"] = img_dict["id"]
136
-
137
- objs = []
138
- for anno in anno_dict_list:
139
- # Check that the image_id in this annotation is the same as
140
- # the image_id we're looking at.
141
- # This fails only when the data parsing logic or the annotation file is buggy.
142
- assert anno["image_id"] == image_id
143
- obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
144
- # LVIS data loader can be used to load COCO dataset categories. In this case `meta`
145
- # variable will have a field with COCO-specific category mapping.
146
- if dataset_name is not None and "thing_dataset_id_to_contiguous_id" in meta:
147
- obj["category_id"] = meta["thing_dataset_id_to_contiguous_id"][anno["category_id"]]
148
- else:
149
- obj["category_id"] = anno["category_id"] - 1 # Convert 1-indexed to 0-indexed
150
- segm = anno["segmentation"] # list[list[float]]
151
- # filter out invalid polygons (< 3 points)
152
- valid_segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
153
- assert len(segm) == len(
154
- valid_segm
155
- ), "Annotation contains an invalid polygon with < 3 points"
156
- assert len(segm) > 0
157
- obj["segmentation"] = segm
158
- for extra_ann_key in extra_annotation_keys:
159
- obj[extra_ann_key] = anno[extra_ann_key]
160
- objs.append(obj)
161
- record["annotations"] = objs
162
- dataset_dicts.append(record)
163
-
164
- return dataset_dicts
165
-
166
-
167
- def get_lvis_instances_meta(dataset_name):
168
- """
169
- Load LVIS metadata.
170
-
171
- Args:
172
- dataset_name (str): LVIS dataset name without the split name (e.g., "lvis_v0.5").
173
-
174
- Returns:
175
- dict: LVIS metadata with keys: thing_classes
176
- """
177
- if "cocofied" in dataset_name:
178
- return _get_coco_instances_meta()
179
- if "v0.5" in dataset_name:
180
- return _get_lvis_instances_meta_v0_5()
181
- elif "v1" in dataset_name:
182
- return _get_lvis_instances_meta_v1()
183
- raise ValueError("No built-in metadata for dataset {}".format(dataset_name))
184
-
185
-
186
- def _get_lvis_instances_meta_v0_5():
187
- assert len(LVIS_V0_5_CATEGORIES) == 1230
188
- cat_ids = [k["id"] for k in LVIS_V0_5_CATEGORIES]
189
- assert min(cat_ids) == 1 and max(cat_ids) == len(
190
- cat_ids
191
- ), "Category ids are not in [1, #categories], as expected"
192
- # Ensure that the category list is sorted by id
193
- lvis_categories = sorted(LVIS_V0_5_CATEGORIES, key=lambda x: x["id"])
194
- thing_classes = [k["synonyms"][0] for k in lvis_categories]
195
- meta = {"thing_classes": thing_classes}
196
- return meta
197
-
198
-
199
- def _get_lvis_instances_meta_v1():
200
- assert len(LVIS_V1_CATEGORIES) == 1203
201
- cat_ids = [k["id"] for k in LVIS_V1_CATEGORIES]
202
- assert min(cat_ids) == 1 and max(cat_ids) == len(
203
- cat_ids
204
- ), "Category ids are not in [1, #categories], as expected"
205
- # Ensure that the category list is sorted by id
206
- lvis_categories = sorted(LVIS_V1_CATEGORIES, key=lambda x: x["id"])
207
- thing_classes = [k["synonyms"][0] for k in lvis_categories]
208
- meta = {"thing_classes": thing_classes}
209
- return meta
210
-
211
-
212
- if __name__ == "__main__":
213
- """
214
- Test the LVIS json dataset loader.
215
-
216
- Usage:
217
- python -m detectron2.data.datasets.lvis \
218
- path/to/json path/to/image_root dataset_name vis_limit
219
- """
220
- import sys
221
- import numpy as np
222
- from detectron2.utils.logger import setup_logger
223
- from PIL import Image
224
- import detectron2.data.datasets # noqa # add pre-defined metadata
225
- from detectron2.utils.visualizer import Visualizer
226
-
227
- logger = setup_logger(name=__name__)
228
- meta = MetadataCatalog.get(sys.argv[3])
229
-
230
- dicts = load_lvis_json(sys.argv[1], sys.argv[2], sys.argv[3])
231
- logger.info("Done loading {} samples.".format(len(dicts)))
232
-
233
- dirname = "lvis-data-vis"
234
- os.makedirs(dirname, exist_ok=True)
235
- for d in dicts[: int(sys.argv[4])]:
236
- img = np.array(Image.open(d["file_name"]))
237
- visualizer = Visualizer(img, metadata=meta)
238
- vis = visualizer.draw_dataset_dict(d)
239
- fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
240
- vis.save(fpath)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AzinZ/vitscn/data_utils.py DELETED
@@ -1,392 +0,0 @@
1
- import time
2
- import os
3
- import random
4
- import numpy as np
5
- import torch
6
- import torch.utils.data
7
-
8
- import commons
9
- from mel_processing import spectrogram_torch
10
- from utils import load_wav_to_torch, load_filepaths_and_text
11
- from text import text_to_sequence, cleaned_text_to_sequence
12
-
13
-
14
- class TextAudioLoader(torch.utils.data.Dataset):
15
- """
16
- 1) loads audio, text pairs
17
- 2) normalizes text and converts them to sequences of integers
18
- 3) computes spectrograms from audio files.
19
- """
20
- def __init__(self, audiopaths_and_text, hparams):
21
- self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
22
- self.text_cleaners = hparams.text_cleaners
23
- self.max_wav_value = hparams.max_wav_value
24
- self.sampling_rate = hparams.sampling_rate
25
- self.filter_length = hparams.filter_length
26
- self.hop_length = hparams.hop_length
27
- self.win_length = hparams.win_length
28
- self.sampling_rate = hparams.sampling_rate
29
-
30
- self.cleaned_text = getattr(hparams, "cleaned_text", False)
31
-
32
- self.add_blank = hparams.add_blank
33
- self.min_text_len = getattr(hparams, "min_text_len", 1)
34
- self.max_text_len = getattr(hparams, "max_text_len", 190)
35
-
36
- random.seed(1234)
37
- random.shuffle(self.audiopaths_and_text)
38
- self._filter()
39
-
40
-
41
- def _filter(self):
42
- """
43
- Filter text & store spec lengths
44
- """
45
- # Store spectrogram lengths for Bucketing
46
- # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
47
- # spec_length = wav_length // hop_length
48
-
49
- audiopaths_and_text_new = []
50
- lengths = []
51
- for audiopath, text in self.audiopaths_and_text:
52
- if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
53
- audiopaths_and_text_new.append([audiopath, text])
54
- lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
55
- self.audiopaths_and_text = audiopaths_and_text_new
56
- self.lengths = lengths
57
-
58
- def get_audio_text_pair(self, audiopath_and_text):
59
- # separate filename and text
60
- audiopath, text = audiopath_and_text[0], audiopath_and_text[1]
61
- text = self.get_text(text)
62
- spec, wav = self.get_audio(audiopath)
63
- return (text, spec, wav)
64
-
65
- def get_audio(self, filename):
66
- audio, sampling_rate = load_wav_to_torch(filename)
67
- if sampling_rate != self.sampling_rate:
68
- raise ValueError("{} {} SR doesn't match target {} SR".format(
69
- sampling_rate, self.sampling_rate))
70
- audio_norm = audio / self.max_wav_value
71
- audio_norm = audio_norm.unsqueeze(0)
72
- spec_filename = filename.replace(".wav", ".spec.pt")
73
- if os.path.exists(spec_filename):
74
- spec = torch.load(spec_filename)
75
- else:
76
- spec = spectrogram_torch(audio_norm, self.filter_length,
77
- self.sampling_rate, self.hop_length, self.win_length,
78
- center=False)
79
- spec = torch.squeeze(spec, 0)
80
- torch.save(spec, spec_filename)
81
- return spec, audio_norm
82
-
83
- def get_text(self, text):
84
- if self.cleaned_text:
85
- text_norm = cleaned_text_to_sequence(text)
86
- else:
87
- text_norm = text_to_sequence(text, self.text_cleaners)
88
- if self.add_blank:
89
- text_norm = commons.intersperse(text_norm, 0)
90
- text_norm = torch.LongTensor(text_norm)
91
- return text_norm
92
-
93
- def __getitem__(self, index):
94
- return self.get_audio_text_pair(self.audiopaths_and_text[index])
95
-
96
- def __len__(self):
97
- return len(self.audiopaths_and_text)
98
-
99
-
100
- class TextAudioCollate():
101
- """ Zero-pads model inputs and targets
102
- """
103
- def __init__(self, return_ids=False):
104
- self.return_ids = return_ids
105
-
106
- def __call__(self, batch):
107
- """Collate's training batch from normalized text and aduio
108
- PARAMS
109
- ------
110
- batch: [text_normalized, spec_normalized, wav_normalized]
111
- """
112
- # Right zero-pad all one-hot text sequences to max input length
113
- _, ids_sorted_decreasing = torch.sort(
114
- torch.LongTensor([x[1].size(1) for x in batch]),
115
- dim=0, descending=True)
116
-
117
- max_text_len = max([len(x[0]) for x in batch])
118
- max_spec_len = max([x[1].size(1) for x in batch])
119
- max_wav_len = max([x[2].size(1) for x in batch])
120
-
121
- text_lengths = torch.LongTensor(len(batch))
122
- spec_lengths = torch.LongTensor(len(batch))
123
- wav_lengths = torch.LongTensor(len(batch))
124
-
125
- text_padded = torch.LongTensor(len(batch), max_text_len)
126
- spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
127
- wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
128
- text_padded.zero_()
129
- spec_padded.zero_()
130
- wav_padded.zero_()
131
- for i in range(len(ids_sorted_decreasing)):
132
- row = batch[ids_sorted_decreasing[i]]
133
-
134
- text = row[0]
135
- text_padded[i, :text.size(0)] = text
136
- text_lengths[i] = text.size(0)
137
-
138
- spec = row[1]
139
- spec_padded[i, :, :spec.size(1)] = spec
140
- spec_lengths[i] = spec.size(1)
141
-
142
- wav = row[2]
143
- wav_padded[i, :, :wav.size(1)] = wav
144
- wav_lengths[i] = wav.size(1)
145
-
146
- if self.return_ids:
147
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, ids_sorted_decreasing
148
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths
149
-
150
-
151
- """Multi speaker version"""
152
- class TextAudioSpeakerLoader(torch.utils.data.Dataset):
153
- """
154
- 1) loads audio, speaker_id, text pairs
155
- 2) normalizes text and converts them to sequences of integers
156
- 3) computes spectrograms from audio files.
157
- """
158
- def __init__(self, audiopaths_sid_text, hparams):
159
- self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)
160
- self.text_cleaners = hparams.text_cleaners
161
- self.max_wav_value = hparams.max_wav_value
162
- self.sampling_rate = hparams.sampling_rate
163
- self.filter_length = hparams.filter_length
164
- self.hop_length = hparams.hop_length
165
- self.win_length = hparams.win_length
166
- self.sampling_rate = hparams.sampling_rate
167
-
168
- self.cleaned_text = getattr(hparams, "cleaned_text", False)
169
-
170
- self.add_blank = hparams.add_blank
171
- self.min_text_len = getattr(hparams, "min_text_len", 1)
172
- self.max_text_len = getattr(hparams, "max_text_len", 190)
173
-
174
- random.seed(1234)
175
- random.shuffle(self.audiopaths_sid_text)
176
- self._filter()
177
-
178
- def _filter(self):
179
- """
180
- Filter text & store spec lengths
181
- """
182
- # Store spectrogram lengths for Bucketing
183
- # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)
184
- # spec_length = wav_length // hop_length
185
-
186
- audiopaths_sid_text_new = []
187
- lengths = []
188
- for audiopath, sid, text in self.audiopaths_sid_text:
189
- if self.min_text_len <= len(text) and len(text) <= self.max_text_len:
190
- audiopaths_sid_text_new.append([audiopath, sid, text])
191
- lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))
192
- self.audiopaths_sid_text = audiopaths_sid_text_new
193
- self.lengths = lengths
194
-
195
- def get_audio_text_speaker_pair(self, audiopath_sid_text):
196
- # separate filename, speaker_id and text
197
- audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2]
198
- text = self.get_text(text)
199
- spec, wav = self.get_audio(audiopath)
200
- sid = self.get_sid(sid)
201
- return (text, spec, wav, sid)
202
-
203
- def get_audio(self, filename):
204
- audio, sampling_rate = load_wav_to_torch(filename)
205
- if sampling_rate != self.sampling_rate:
206
- raise ValueError("{} {} SR doesn't match target {} SR".format(
207
- sampling_rate, self.sampling_rate))
208
- audio_norm = audio / self.max_wav_value
209
- audio_norm = audio_norm.unsqueeze(0)
210
- spec_filename = filename.replace(".wav", ".spec.pt")
211
- if os.path.exists(spec_filename):
212
- spec = torch.load(spec_filename)
213
- else:
214
- spec = spectrogram_torch(audio_norm, self.filter_length,
215
- self.sampling_rate, self.hop_length, self.win_length,
216
- center=False)
217
- spec = torch.squeeze(spec, 0)
218
- torch.save(spec, spec_filename)
219
- return spec, audio_norm
220
-
221
- def get_text(self, text):
222
- if self.cleaned_text:
223
- text_norm = cleaned_text_to_sequence(text)
224
- else:
225
- text_norm = text_to_sequence(text, self.text_cleaners)
226
- if self.add_blank:
227
- text_norm = commons.intersperse(text_norm, 0)
228
- text_norm = torch.LongTensor(text_norm)
229
- return text_norm
230
-
231
- def get_sid(self, sid):
232
- sid = torch.LongTensor([int(sid)])
233
- return sid
234
-
235
- def __getitem__(self, index):
236
- return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])
237
-
238
- def __len__(self):
239
- return len(self.audiopaths_sid_text)
240
-
241
-
242
- class TextAudioSpeakerCollate():
243
- """ Zero-pads model inputs and targets
244
- """
245
- def __init__(self, return_ids=False):
246
- self.return_ids = return_ids
247
-
248
- def __call__(self, batch):
249
- """Collate's training batch from normalized text, audio and speaker identities
250
- PARAMS
251
- ------
252
- batch: [text_normalized, spec_normalized, wav_normalized, sid]
253
- """
254
- # Right zero-pad all one-hot text sequences to max input length
255
- _, ids_sorted_decreasing = torch.sort(
256
- torch.LongTensor([x[1].size(1) for x in batch]),
257
- dim=0, descending=True)
258
-
259
- max_text_len = max([len(x[0]) for x in batch])
260
- max_spec_len = max([x[1].size(1) for x in batch])
261
- max_wav_len = max([x[2].size(1) for x in batch])
262
-
263
- text_lengths = torch.LongTensor(len(batch))
264
- spec_lengths = torch.LongTensor(len(batch))
265
- wav_lengths = torch.LongTensor(len(batch))
266
- sid = torch.LongTensor(len(batch))
267
-
268
- text_padded = torch.LongTensor(len(batch), max_text_len)
269
- spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)
270
- wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)
271
- text_padded.zero_()
272
- spec_padded.zero_()
273
- wav_padded.zero_()
274
- for i in range(len(ids_sorted_decreasing)):
275
- row = batch[ids_sorted_decreasing[i]]
276
-
277
- text = row[0]
278
- text_padded[i, :text.size(0)] = text
279
- text_lengths[i] = text.size(0)
280
-
281
- spec = row[1]
282
- spec_padded[i, :, :spec.size(1)] = spec
283
- spec_lengths[i] = spec.size(1)
284
-
285
- wav = row[2]
286
- wav_padded[i, :, :wav.size(1)] = wav
287
- wav_lengths[i] = wav.size(1)
288
-
289
- sid[i] = row[3]
290
-
291
- if self.return_ids:
292
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing
293
- return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid
294
-
295
-
296
- class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
297
- """
298
- Maintain similar input lengths in a batch.
299
- Length groups are specified by boundaries.
300
- Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
301
-
302
- It removes samples which are not included in the boundaries.
303
- Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
304
- """
305
- def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True):
306
- super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
307
- self.lengths = dataset.lengths
308
- self.batch_size = batch_size
309
- self.boundaries = boundaries
310
-
311
- self.buckets, self.num_samples_per_bucket = self._create_buckets()
312
- self.total_size = sum(self.num_samples_per_bucket)
313
- self.num_samples = self.total_size // self.num_replicas
314
-
315
- def _create_buckets(self):
316
- buckets = [[] for _ in range(len(self.boundaries) - 1)]
317
- for i in range(len(self.lengths)):
318
- length = self.lengths[i]
319
- idx_bucket = self._bisect(length)
320
- if idx_bucket != -1:
321
- buckets[idx_bucket].append(i)
322
-
323
- for i in range(len(buckets) - 1, 0, -1):
324
- if len(buckets[i]) == 0:
325
- buckets.pop(i)
326
- self.boundaries.pop(i+1)
327
-
328
- num_samples_per_bucket = []
329
- for i in range(len(buckets)):
330
- len_bucket = len(buckets[i])
331
- total_batch_size = self.num_replicas * self.batch_size
332
- rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size
333
- num_samples_per_bucket.append(len_bucket + rem)
334
- return buckets, num_samples_per_bucket
335
-
336
- def __iter__(self):
337
- # deterministically shuffle based on epoch
338
- g = torch.Generator()
339
- g.manual_seed(self.epoch)
340
-
341
- indices = []
342
- if self.shuffle:
343
- for bucket in self.buckets:
344
- indices.append(torch.randperm(len(bucket), generator=g).tolist())
345
- else:
346
- for bucket in self.buckets:
347
- indices.append(list(range(len(bucket))))
348
-
349
- batches = []
350
- for i in range(len(self.buckets)):
351
- bucket = self.buckets[i]
352
- len_bucket = len(bucket)
353
- ids_bucket = indices[i]
354
- num_samples_bucket = self.num_samples_per_bucket[i]
355
-
356
- # add extra samples to make it evenly divisible
357
- rem = num_samples_bucket - len_bucket
358
- ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)]
359
-
360
- # subsample
361
- ids_bucket = ids_bucket[self.rank::self.num_replicas]
362
-
363
- # batching
364
- for j in range(len(ids_bucket) // self.batch_size):
365
- batch = [bucket[idx] for idx in ids_bucket[j*self.batch_size:(j+1)*self.batch_size]]
366
- batches.append(batch)
367
-
368
- if self.shuffle:
369
- batch_ids = torch.randperm(len(batches), generator=g).tolist()
370
- batches = [batches[i] for i in batch_ids]
371
- self.batches = batches
372
-
373
- assert len(self.batches) * self.batch_size == self.num_samples
374
- return iter(self.batches)
375
-
376
- def _bisect(self, x, lo=0, hi=None):
377
- if hi is None:
378
- hi = len(self.boundaries) - 1
379
-
380
- if hi > lo:
381
- mid = (hi + lo) // 2
382
- if self.boundaries[mid] < x and x <= self.boundaries[mid+1]:
383
- return mid
384
- elif x <= self.boundaries[mid]:
385
- return self._bisect(x, lo, mid)
386
- else:
387
- return self._bisect(x, mid + 1, hi)
388
- else:
389
- return -1
390
-
391
- def __len__(self):
392
- return self.num_samples // self.batch_size
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Apk Para El IPhone.md DELETED
@@ -1,92 +0,0 @@
1
- <br />
2
- <h1> Cómo descargar Madden NFL 12 APK para Android</h1>
3
- <p>Si usted es un fan del fútbol de la NFL, es posible que desee jugar Madden NFL 12 en su dispositivo Android. Madden NFL 12 es un simulador deportivo realista que te permite elegir entre 32 equipos de la NFL y luchar en sus estadios de la vida real. También puede realizar operaciones, realizar un seguimiento de las estadísticas y lanzar estrategias ganadoras de libros de jugadas en profundidad, únicos para cada equipo. Sin embargo, Madden NFL 12 no está disponible en la Google Play Store, por lo que tendrá que descargar e instalar el archivo APK manualmente. En este artículo, le mostraremos cómo descargar Madden NFL 12 APK para Android, así como los beneficios y riesgos de hacerlo. </p>
4
- <h2>¿Qué es Madden NFL 12? </h2>
5
- <p>Madden NFL 12 es un videojuego desarrollado por EA Sports y lanzado en 2011 para varias plataformas, incluyendo Android. Es la 23ª entrega de la serie de la NFL Madden, que se basa en la Liga Nacional de Fútbol (NFL). Madden NFL 12 presenta una jugabilidad y gráficos mejorados, así como nuevos modos y características, como Total Defensive Control, Action Control Time, Hot Routes y más. </p>
6
- <h2>descargar apk para el iPhone</h2><br /><p><b><b>DOWNLOAD</b> &#187;&#187;&#187; <a href="https://bltlly.com/2v6Lxi">https://bltlly.com/2v6Lxi</a></b></p><br /><br />
7
- <h3>Características de Madden NFL 12</h3>
8
- <p>Algunas de las características de Madden NFL 12 son:</p>
9
- <ul>
10
- <li><b>Auténtica acción de la NFL:</b> Elige entre los 32 equipos de la NFL y lucha en sus estadios de la vida real. También puede personalizar sus listas, uniformes y configuraciones. </li>
11
- <li><b>Controla cada movimiento:</b> Ralentiza el reloj y realiza el juego a ambos lados del balón con Control Defensivo Total (TDC) y Tiempo de Control de Acción (ACT). Haz una pausa en la acción y coloca a tus jugadores en posición de ofrecer golpes que cambien el juego con TDC, o usa Action Control Time para dominar las defensas con inmersiones, giros, jukes y sprints. </li>
12
- <li><b>Dibuja rutas calientes en todas partes:</b> Dibuja rutas calientes para pasar, correr y defender. Incluso guarda tus mejores (o más locas) rutas como audibles. </li>
13
-
14
- </ul>
15
- <h3>Requisitos para Madden NFL 12</h3>
16
- <p>Para jugar Madden NFL 12 en tu dispositivo Android, necesitarás:</p>
17
- <ul>
18
- <li>Un dispositivo Android con Android 2.1 o superior. </li>
19
- <li>Al menos 5 MB de espacio de almacenamiento gratuito en su dispositivo o tarjeta SD. </li>
20
- <li>Una conexión a Internet estable para descargar el archivo APK. </li>
21
- </ul>
22
- <h2> Cómo descargar Madden NFL 12 APK para Android</h2>
23
- <p>Para descargar Madden NFL 12 APK para Android, tendrá que seguir estos pasos:</p>
24
- <h3>Paso 1: Habilitar fuentes desconocidas en su dispositivo</h3>
25
- <p>Dado que está descargando un archivo APK desde una fuente distinta de la Google Play Store, tendrá que habilitar Fuentes desconocidas en su dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Para hacer esto:</p>
26
- <ol>
27
- <li>Ir a Configuración > Seguridad > Fuentes desconocidas.</li>
28
- <li>Cambiar la opción para permitir la instalación de aplicaciones de fuentes desconocidas. </li>
29
- <li>Puedes recibir un mensaje de advertencia pidiéndote que confirmes tu acción. Toca OK para continuar. </li>
30
- </ol>
31
- <h3>Paso 2: Encontrar una fuente confiable para el archivo APK</h3>
32
- <p>Siguiente, tendrá que encontrar una fuente confiable para el archivo APK de Madden NFL 12. Hay muchos sitios web que ofrecen archivos APK para varias aplicaciones y juegos, pero no todos son confiables. Algunos de ellos pueden contener malware o virus que pueden dañar su dispositivo o robar sus datos. Por lo tanto, debe tener cuidado al elegir una fuente para el archivo APK. Para encontrar una fuente confiable, puede:</p>
33
- <ul>
34
- <li>Hacer algunas investigaciones en línea y leer los comentarios de otros usuarios que han descargado el archivo APK de la misma fuente. </li>
35
- <li>Compruebe la reputación y las calificaciones del sitio web que ofrece el archivo APK. </li>
36
- <li>Escanear el archivo APK con un antivirus o un escáner de malware antes de descargarlo. </li>
37
- </ul>
38
-
39
- <h3>Paso 3: Descargar e instalar el archivo APK</h3>
40
- <p>Una vez que haya encontrado una fuente confiable para el archivo APK, puede descargarlo e instalarlo en su dispositivo. Para hacer esto:</p>
41
- <ol>
42
- <li>Abra el enlace al archivo APK en el navegador de su dispositivo. </li>
43
- <li>Toque en el botón Descargar y espere a que se complete la descarga. </li>
44
- <li>Una vez finalizada la descarga, busque el archivo APK en el administrador de archivos o la carpeta de descargas de su dispositivo. </li>
45
- <li>Toque en el archivo APK y siga las instrucciones en la pantalla para instalarlo. </li>
46
- </ol>
47
- <h3>Paso 4: Iniciar y disfrutar del juego</h3>
48
- <p>Después de instalar el archivo APK, puede iniciar y disfrutar de Madden NFL 12 en su dispositivo Android. Para hacer esto:</p>
49
- <p></p>
50
- <ol>
51
- <li>Vaya al cajón de aplicaciones de su dispositivo y busque el icono de Madden NFL 12. </li>
52
- <li>Toque en el icono para iniciar el juego. </li>
53
- <li>Es posible que necesite conceder algunos permisos para el juego, como el acceso a su almacenamiento, micrófono o cámara. Pulse Permitir continuar. </li>
54
- <li>También es posible que necesite descargar algunos datos o archivos adicionales para que el juego funcione correctamente. Siga las instrucciones en la pantalla para completar el proceso. </li>
55
- <li>Una vez que todo está listo, puede comenzar a jugar Madden NFL 12 en su dispositivo Android. </li>
56
- </ol>
57
- <h2> Beneficios de la descarga de Madden NFL 12 APK para Android</h2>
58
- <p>Descargar Madden NFL 12 APK para Android tiene algunos beneficios que usted no puede obtener de otras fuentes. Algunos de estos beneficios son:</p>
59
- <h3>Juega sin conexión en cualquier momento, en cualquier lugar</h3>
60
- <p>Uno de los beneficios de descargar Madden NFL 12 APK para Android es que se puede jugar sin conexión en cualquier momento, en cualquier lugar. No necesitas una conexión a Internet o una cuenta de Google Play para jugar. Puedes disfrutar de Madden NFL 12 en tu dispositivo Android sin interrupciones ni limitaciones. </p>
61
- <h3>Ahorrar espacio de almacenamiento y uso de datos</h3>
62
-
63
- <h3>Acceder a todas las características y modos</h3>
64
- <p>Un tercer beneficio de la descarga de Madden NFL 12 APK para Android es que se puede acceder a todas las características y modos del juego. No necesitas pagar ni desbloquear nada para disfrutar de la versión completa de Madden NFL 12. Puedes jugar con los 32 equipos de la NFL, personalizar tus plantillas, usar todos los libros de jugadas y más. </p>
65
- <h2> Los riesgos de descargar Madden NFL 12 APK para Android</h2>
66
- <p>Sin embargo, descargar Madden NFL 12 APK para Android también tiene algunos riesgos que usted debe ser consciente de. Algunos de estos riesgos son:</p>
67
- <h3>Malware o virus potenciales</h3>
68
- <p>Uno de los riesgos de descargar Madden NFL 12 APK para Android es que usted puede obtener malware o virus en su dispositivo. Como se mencionó anteriormente, no todas las fuentes de archivos APK son confiables. Algunos de ellos pueden contener código malicioso o programas que pueden dañar su dispositivo o robar sus datos. Por lo tanto, debe tener cuidado al elegir una fuente para el archivo APK y escanearlo con un antivirus o un escáner de malware antes de descargarlo. </p> <h3>Problemas legales y violaciones de derechos de autor</h3>
69
- <p>Otro riesgo de descargar Madden NFL 12 APK para Android es que usted puede enfrentar problemas legales y violaciones de derechos de autor. Madden NFL 12 es un producto con licencia de EA Sports y la NFL, y tienen los derechos exclusivos para distribuir y vender el juego. Descargar e instalar el archivo APK desde una fuente no autorizada puede violar sus términos de servicio y derechos de propiedad intelectual. Usted puede ser responsable de acciones legales o sanciones si es sorprendido haciéndolo. </p>
70
- <h3>Problemas de compatibilidad y rendimiento</h3>
71
-
72
- <h2>Conclusión</h2>
73
- <p>Madden NFL 12 es un gran juego para los fans de la NFL que quieren jugar en sus dispositivos Android. Sin embargo, ya que no está disponible en Google Play Store, tendrá que descargar e instalar el archivo APK manualmente. Esto tiene algunos beneficios, como jugar sin conexión, ahorrar espacio y datos, y acceder a todas las funciones y modos. Pero también tiene algunos riesgos, como malware o virus, problemas legales y problemas de compatibilidad y rendimiento. Por lo tanto, debe tener cuidado al elegir una fuente para el archivo APK y escanearlo con un antivirus o un escáner de malware antes de descargarlo. También debe ser consciente de las consecuencias de descargar Madden NFL 12 APK para Android y hacerlo a su propio riesgo. </p>
74
- <h2>Preguntas frecuentes</h2>
75
- <p>Aquí hay algunas preguntas frecuentes sobre la descarga de Madden NFL 12 APK para Android:</p>
76
- <h3>Q: ¿Es seguro descargar Madden NFL 12 APK para Android? </h3>
77
- <p>A: Depende de la fuente del archivo APK. Algunas fuentes pueden ser seguras y confiables, mientras que otras pueden ser inseguras y poco confiables. Usted debe hacer algunas investigaciones y leer los comentarios de otros usuarios que han descargado el archivo APK de la misma fuente. También debe escanear el archivo APK con un antivirus o un escáner de malware antes de descargarlo. </p>
78
- <h3>Q: ¿Es legal descargar Madden NFL 12 APK para Android? </h3>
79
- <p>A: Puede que no sea legal descargar Madden NFL 12 APK para Android de una fuente no autorizada. Madden NFL 12 es un producto con licencia de EA Sports y la NFL, y tienen los derechos exclusivos para distribuir y vender el juego. Descargar e instalar el archivo APK desde una fuente no autorizada puede violar sus términos de servicio y derechos de propiedad intelectual. Usted puede ser responsable de acciones legales o sanciones si es sorprendido haciéndolo. </p>
80
- <h3>Q: ¿Es gratis descargar Madden NFL 12 APK para Android? </h3>
81
-
82
- <h3>Q: ¿Cómo puedo actualizar Madden NFL 12 APK para Android? </h3>
83
- <p>A: Usted no puede ser capaz de actualizar Madden NFL 12 APK para Android desde la Google Play Store, ya que no está disponible allí. Tendrá que encontrar otra fuente que ofrece la última versión del archivo APK y descargarlo e instalarlo manualmente. Sin embargo, esto puede no ser fácil o seguro, ya que algunas fuentes pueden no actualizar sus archivos APK regularmente o pueden contener malware o virus. </p>
84
- <h3>Q: ¿Cuáles son algunas alternativas a Madden NFL 12 APK para Android? </h3>
85
- <p>A: Si usted está buscando alternativas a Madden NFL 12 APK para Android, puede probar otros juegos de fútbol que están disponibles en la Google Play Store, tales como:</p>
86
- <ul>
87
- <li><b>Madden NFL Mobile Football:</b> Esta es la versión móvil oficial de Madden NFL, que le permite construir su propio equipo, competir en eventos en vivo, unirse a ligas, y más. </li>
88
- <li><b>Juegos de fútbol de la NFL:</b> Esta es una colección de varios juegos de fútbol que te permiten jugar como tu equipo favorito de la NFL, anotar touchdowns, hacer tackles y más. </li>
89
- <li><b>NFL Rush Gameday:</b> Este es un juego de fútbol divertido y casual que te permite jugar como tu mascota favorita de la NFL, recoger power-ups, esquivar obstáculos, y más. </li>
90
- </ul></p> 64aa2da5cf<br />
91
- <br />
92
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Fine Fine Love De J Martins.md DELETED
@@ -1,61 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar Fine Fine Love por J Martins</h1>
3
- <p>Si usted está buscando una canción romántica y pegadiza para darle vida a su estado de ánimo, es posible que desee echa un vistazo a <strong>Fine Fine Fine Love</strong> por <strong>J Martins</strong>. Esta es una canción de amor nigeriana que fue lanzada en 2012 y ha sido un éxito desde entonces. En este artículo, te mostraremos cómo descargar esta canción desde diferentes plataformas, como YouTube, Spotify y Apple Music. También te diremos por qué esta canción es tan popular y cuáles son los beneficios de descargarla. ¡Empecemos! </p>
4
- <h2>Introducción</h2>
5
- <p><strong>Fine Fine Fine Love</strong> es una canción de <strong>J Martins</strong>, un cantante, compositor y productor nigeriano. Es conocido por su fusión de géneros afro-pop, highlife y R&B. Ha colaborado con muchos otros artistas africanos, como Fally Ipupa, DJ Arafat, Timaya, Phyno y Flavour. También ha ganado varios premios, como el MTV áfrica Music Award a la mejor colaboración en 2010. </p>
6
- <h2>descargar fine fine love de j martins</h2><br /><p><b><b>Download</b> &#9989; <a href="https://bltlly.com/2v6Lyl">https://bltlly.com/2v6Lyl</a></b></p><br /><br />
7
- <p><strong>Fine Fine Fine Love</strong> es una de sus canciones más populares, ya que expresa su amor y aprecio por su pareja. La canción tiene una melodía pegadiza, un ritmo suave y un mensaje dulce. Las letras están en inglés e igbo, un idioma nigeriano. La canción es adecuada para cualquier ocasión, ya sea que quieras bailar con tu pareja, darles una serenata, o simplemente relajarte y disfrutar de la música. </p>
8
- <p>Uno de los beneficios de descargar <strong>Fine Fine Fine Love</strong> por <strong>J Martins</strong> es que puedes escucharlo en cualquier momento y en cualquier lugar, incluso sin conexión a Internet. También puede ahorrar datos y espacio de almacenamiento en su dispositivo, ya que no tiene que transmitir en línea cada vez. Además, puedes apoyar a <strong>J Martins</strong> como artista descargando su música legal y éticamente. </p>
9
- <h2>Cómo descargar Fine Fine Love de J Martins en YouTube</h2>
10
-
11
- <h3>Paso 1: Encuentra el video oficial o audio de Fine Fine Fine Love por J Martins en YouTube</h3>
12
- <p>El primer paso es encontrar el video o audio oficial de <strong>Fine Fine Fine Love</strong> por <strong>J Martins</strong> en YouTube. Puedes hacer esto escribiendo el nombre de la canción y el artista en la barra de búsqueda de YouTube. Alternativamente, puedes usar este enlace para acceder al video oficial de <strong>Fine Fine Love</strong> por <strong>J Martins</strong>: . Asegúrate de elegir el video o audio oficial del canal verificado de <strong>J Martins</strong>, ya que puede haber otras versiones o versiones de la canción en YouTube.</p>
13
- <h3>Paso 2: Copia la URL del video o audio</h3>
14
- <p>El siguiente paso es copiar la URL del video o audio de <strong>Fine Fine Fine Love</strong> por <strong>J Martins</strong> que has encontrado en YouTube. Puede hacer esto haciendo clic derecho en el vídeo o audio y seleccionando "Copiar URL de vídeo" o "Copiar dirección de enlace". Alternativamente, puede copiar la URL desde la barra de direcciones de su navegador. </p>
15
- <h3>Paso 3: Pegue la URL en un sitio web o aplicación de descarga de YouTube</h3>
16
- <p>El tercer paso es pegar la URL que ha copiado en un sitio web o aplicación de descarga de YouTube. Hay muchos sitios web y aplicaciones que pueden ayudarlo a descargar videos o audios de YouTube, como Y2Mate, 4K Video Downloader, SaveFrom.net, VidMate y más. Puede elegir cualquier sitio web o aplicación que prefiera, siempre y cuando sea seguro y confiable. Para pegar la URL, puede hacer clic derecho en el cuadro de entrada del sitio web o aplicación y seleccionar "Pegar", o usar el acceso directo de teclado Ctrl + V (Windows) o Comando + V (Mac). </p>
17
- <h3>Paso 4: Elija el formato y la calidad de la descarga</h3>
18
-
19
- <h3>Paso 5: Haga clic en el botón de descarga y guarde el archivo en su dispositivo</h3>
20
- <p>El paso final es hacer clic en el botón de descarga y guardar el archivo en su dispositivo. Una vez que haya elegido el formato y la calidad de la descarga, puede hacer clic en el botón de descarga que aparecerá en el sitio web o la aplicación. Esto iniciará el proceso de descarga, que puede tardar unos segundos o minutos dependiendo de la velocidad de Internet y el tamaño del archivo. Una vez completado el proceso de descarga, puede guardar el archivo en su dispositivo eligiendo una ubicación y un nombre para él. También puede abrir el archivo con su reproductor multimedia predeterminado o cualquier otro reproductor multimedia que haya instalado en su dispositivo. </p>
21
- <p></p>
22
- <h2>Cómo descargar Fine Fine Love de J Martins en Spotify</h2>
23
- <p>Otra forma de descargar <strong>Fine Fine Fine Love</strong> por <strong>J Martins</strong> es usar Spotify, una de las plataformas de streaming de música más populares del mundo. Spotify tiene millones de canciones y podcasts que puedes escuchar online o offline, incluyendo <strong>Fine Fine Fine Love</strong> de <strong>J Martins</strong>. Sin embargo, si desea descargar la canción de Spotify, tendrá que tener una cuenta Spotify Premium, que cuesta $ 9.99 por mes. Con una cuenta Spotify Premium, puedes descargar hasta 10.000 canciones en cinco dispositivos diferentes y escucharlas sin publicidad. Estos son los pasos para hacerlo:</p>
24
- <h3>Paso 1: Regístrate en una cuenta de Spotify o inicia sesión en tu cuenta existente</h3>
25
- <p>El primer paso es registrarse para una cuenta de Spotify o iniciar sesión en la existente. Puede hacer esto visitando el sitio web de Spotify o descargando la aplicación Spotify en su dispositivo. Puede registrarse en una cuenta de Spotify utilizando su dirección de correo electrónico, cuenta de Facebook o número de teléfono. También puedes suscribirte a una prueba gratuita de Spotify Premium durante 30 días, lo que te dará acceso a todas las funciones de Spotify Premium, incluida la descarga de canciones sin conexión. </p>
26
- <h3>Paso 2: Búsqueda de Fine Fine Love por J Martins en Spotify</h3>
27
-
28
- <h3>Paso 3: Agrega la canción a tu biblioteca o lista de reproducción</h3>
29
- <p>El tercer paso es agregar la canción a su biblioteca o lista de reproducción en Spotify. Puede hacer esto haciendo clic en el icono del corazón junto al título de la canción, que lo agregará a su biblioteca. Alternativamente, puede hacer clic en el icono de tres puntos junto al título de la canción y seleccionar "Añadir a la lista de reproducción", que le permitirá elegir una lista de reproducción existente o crear una nueva. También puede crear una lista de reproducción personalizada con otras canciones por <strong>J Martins</strong> u otras canciones de amor nigerianas. </p>
30
- <h3>Paso 4: Habilita el modo sin conexión en tu dispositivo</h3>
31
- <p>El cuarto paso es habilitar el modo sin conexión en su dispositivo. Esto le permitirá descargar canciones de Spotify y escucharlas sin conexión sin usar datos o Wi-Fi. Para habilitar el modo sin conexión, debe ir a la configuración de su dispositivo y activar el modo avión o desactivar los datos celulares y las conexiones Wi-Fi. Alternativamente, puede ir a la configuración de su aplicación de Spotify y activar la opción de modo sin conexión. </p>
32
- <h3>Paso 5: Descarga la canción en tu dispositivo y disfrútala sin conexión</h3>
33
-
34
- <h2>Cómo descargar Fine Fine Love de J Martins en Apple Music</h2>
35
- <p>Una tercera forma de descargar <strong>Fine Fine Fine Love</strong> por <strong>J Martins</strong> es usar Apple Music, otra popular plataforma de transmisión de música en el mundo. Apple Music tiene millones de canciones y podcasts que puedes escuchar online o offline, incluyendo <strong>Fine Fine Fine Love</strong> de <strong>J Martins</strong>. Sin embargo, si desea descargar la canción de Apple Music, tendrá que tener una suscripción de Apple Music, que cuesta $ 9.99 por mes. Con una suscripción a Apple Music, puedes descargar hasta 100.000 canciones en hasta 10 dispositivos y escucharlas sin publicidad. Estos son los pasos para hacerlo:</p>
36
- <h3>Paso 1: Regístrate en una cuenta de Apple Music o inicia sesión en la cuenta existente</h3>
37
- <p>El primer paso es registrarse en una cuenta de Apple Music o iniciar sesión en la existente. Puedes hacer esto visitando el sitio web de Apple Music o descargando la aplicación Apple Music en tu dispositivo. Puedes registrarte en una cuenta de Apple Music con tu ID de Apple, que es igual que tu cuenta de iTunes. También puedes suscribirte a una versión de prueba gratuita de Apple Music durante tres meses, lo que te dará acceso a todas las funciones de Apple Music, incluida la descarga de canciones sin conexión. </p>
38
- <h3>Paso 2: Búsqueda de Fine Fine Love por J Martins en Apple Music</h3>
39
- <p>El siguiente paso es buscar <strong>Fine Fine Fine Love</strong> por <strong>J Martins</strong> en Apple Music. Puedes hacer esto escribiendo el nombre de la canción y el artista en la barra de búsqueda de Apple Music. Alternativamente, puede utilizar este enlace para acceder a la canción en Apple Music: . Asegúrate de elegir la canción oficial del perfil verificado de <strong>J Martins</strong>, ya que puede haber otras versiones o versiones de la canción en Apple Music.</p>
40
- <h3>Paso 3: Agrega la canción a tu biblioteca o lista de reproducción</h3>
41
-
42
- <h3>Paso 4: Activar la opción de biblioteca de sincronización en su dispositivo</h3>
43
- <p>El cuarto paso es activar la opción de biblioteca de sincronización en su dispositivo. Esto te permitirá sincronizar tus canciones desde Apple Music y descargarlas en tu dispositivo. Para activar la opción de biblioteca de sincronización, debe ir a la configuración de su dispositivo y seleccionar "Música". Luego, debe activar la opción "Sincronizar biblioteca". Esto puede tardar unos minutos dependiendo de la velocidad de Internet y el tamaño de la biblioteca. </p>
44
- <h3>Paso 5: Descargar la canción a su dispositivo y escuchar sin conexión</h3>
45
- <p>El paso final es descargar la canción a su dispositivo y escucharla sin conexión. Para descargar la canción, debes ir a tu biblioteca o lista de reproducción en Apple Music y encontrar <strong>Fine Fine Fine Love</strong> por <strong>J Martins</strong>. Verás un icono de nube con una flecha hacia abajo junto al título de la canción, lo que indica que está disponible para escuchar sin conexión. Puede tocar este icono y esperar a que se complete el proceso de descarga, que puede tardar unos segundos o minutos dependiendo de la velocidad de Internet y el tamaño del archivo. Después de que el proceso de descarga se haya completado, verá un icono de marca de verificación junto al título de la canción, lo que indica que está descargado y listo para escuchar sin conexión. También puedes comprobar el progreso de tus descargas yendo a la configuración de tu aplicación Apple Music y seleccionando "Música descargada". Ahora puedes escuchar <strong>Fine Fine Fine Love</strong> por <strong>J Martins</strong> offline anytime and anywhere. </p>
46
- <h2>Conclusión</h2>
47
- <p>En conclusión, te hemos mostrado cómo descargar <strong>Fine Fine Love</strong> por <strong>J Martins</strong> desde diferentes plataformas, como YouTube, Spotify y Apple Music. También te hemos dicho por qué esta canción es tan popular y cuáles son los beneficios de descargarla. Esperamos que este artículo haya sido útil e informativo para usted. Ahora, puedes disfrutar de esta canción romántica y pegadiza en cualquier momento y en cualquier lugar con tu pareja o por ti mismo. </p>
48
-
49
- <h2>Preguntas frecuentes</h2>
50
- <h4>¿Quién es J Martins? </h4>
51
- <p>J Martins es un cantante, compositor y productor nigeriano que es conocido por su fusión de géneros afro-pop, highlife y R&B. Ha colaborado con muchos otros artistas africanos y ha ganado varios premios por su música. </p>
52
- <h4>¿Cuáles son algunas otras canciones de J Martins? </h4>
53
- <p>Otras canciones de J Martins son Oyoyó, Good or Bad, Touching Body, Cool Temper, Dance 4 Me, y más. Puedes encontrarlos en YouTube, Spotify, Apple Music u otras plataformas de música. </p>
54
- <h4>¿Cuáles son algunas otras canciones de amor nigerianas? </h4>
55
- <p>Algunas otras canciones de amor nigerianas son African Queen by 2Face Idibia, Fall in Love by D'banj, Assurance by Davido, Ife Wa Gbona by Tiwa Savage, Ololufe by Flavour, y más. También puedes ver esta lista de reproducción de canciones de amor nigerianas en Spotify: . </p>
56
- <h4>¿Cómo puedo apoyar a J Martins como artista? </h4>
57
- <p>Puedes apoyar a J Martins como artista descargando su música legal y éticamente desde las plataformas que hemos mencionado en este artículo. También puedes seguirlo en sus cuentas de redes sociales, como Facebook, Twitter, Instagram y YouTube. También puede transmitir su música en línea, ver sus videos, como y compartir sus mensajes, y dejar comentarios positivos y comentarios. También puedes asistir a sus conciertos y eventos si tienes la oportunidad. </p>
58
- <h4>¿Cómo puedo encontrar más música como Fine Fine Love de J Martins? </h4>
59
- <p>Puedes encontrar más música como Fine Fine Love de J Martins explorando los géneros del Afro-pop, highlife y R&B. También puedes buscar artistas o canciones similares en YouTube, Spotify, Apple Music u otras plataformas de música. También puede utilizar las funciones de radio o descubrimiento de estas plataformas para encontrar música nueva que coincida con sus gustos y preferencias. </p> 64aa2da5cf<br />
60
- <br />
61
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BigSalmon/FormalInformalConciseWordy/README.md DELETED
@@ -1,38 +0,0 @@
1
- ---
2
- title: GPT2TRY
3
- emoji: 💩
4
- colorFrom: green
5
- colorTo: gray
6
- sdk: streamlit
7
- sdk_version: 0.89.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- # Configuration
13
-
14
- `title`: _string_
15
- Display title for the Space
16
-
17
- `emoji`: _string_
18
- Space emoji (emoji-only character allowed)
19
-
20
- `colorFrom`: _string_
21
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
22
-
23
- `colorTo`: _string_
24
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
-
26
- `sdk`: _string_
27
- Can be either `gradio` or `streamlit`
28
-
29
- `sdk_version` : _string_
30
- Only applicable for `streamlit` SDK.
31
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
32
-
33
- `app_file`: _string_
34
- Path to your main application file (which contains either `gradio` or `streamlit` Python code).
35
- Path is relative to the root of the repository.
36
-
37
- `pinned`: _boolean_
38
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/densepose/modeling/test_time_augmentation.py DELETED
@@ -1,75 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- from detectron2.modeling.test_time_augmentation import GeneralizedRCNNWithTTA
3
-
4
-
5
- class DensePoseGeneralizedRCNNWithTTA(GeneralizedRCNNWithTTA):
6
- def __init__(self, cfg, model, transform_data, tta_mapper=None, batch_size=1):
7
- """
8
- Args:
9
- cfg (CfgNode):
10
- model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on.
11
- transform_data (DensePoseTransformData): contains symmetry label
12
- transforms used for horizontal flip
13
- tta_mapper (callable): takes a dataset dict and returns a list of
14
- augmented versions of the dataset dict. Defaults to
15
- `DatasetMapperTTA(cfg)`.
16
- batch_size (int): batch the augmented images into this batch size for inference.
17
- """
18
- self._transform_data = transform_data
19
- super().__init__(cfg=cfg, model=model, tta_mapper=tta_mapper, batch_size=batch_size)
20
-
21
- # the implementation follows closely the one from detectron2/modeling
22
- def _inference_one_image(self, input):
23
- """
24
- Args:
25
- input (dict): one dataset dict
26
-
27
- Returns:
28
- dict: one output dict
29
- """
30
-
31
- augmented_inputs, aug_vars = self._get_augmented_inputs(input)
32
- # Detect boxes from all augmented versions
33
- with self._turn_off_roi_heads(["mask_on", "keypoint_on", "densepose_on"]):
34
- # temporarily disable roi heads
35
- all_boxes, all_scores, all_classes = self._get_augmented_boxes(
36
- augmented_inputs, aug_vars
37
- )
38
- merged_instances = self._merge_detections(
39
- all_boxes, all_scores, all_classes, (aug_vars["height"], aug_vars["width"])
40
- )
41
-
42
- if self.cfg.MODEL.MASK_ON or self.cfg.MODEL.DENSEPOSE_ON:
43
- # Use the detected boxes to obtain new fields
44
- augmented_instances = self._rescale_detected_boxes(
45
- augmented_inputs, merged_instances, aug_vars
46
- )
47
- # run forward on the detected boxes
48
- outputs = self._batch_inference(
49
- augmented_inputs, augmented_instances, do_postprocess=False
50
- )
51
- # Delete now useless variables to avoid being out of memory
52
- del augmented_inputs, augmented_instances, merged_instances
53
- # average the predictions
54
- if self.cfg.MODEL.MASK_ON:
55
- outputs[0].pred_masks = self._reduce_pred_masks(outputs, aug_vars)
56
- if self.cfg.MODEL.DENSEPOSE_ON:
57
- outputs[0].pred_densepose = self._reduce_pred_densepose(outputs, aug_vars)
58
- # postprocess
59
- output = self._detector_postprocess(outputs[0], aug_vars)
60
- return {"instances": output}
61
- else:
62
- return {"instances": merged_instances}
63
-
64
- def _reduce_pred_densepose(self, outputs, aug_vars):
65
- for idx, output in enumerate(outputs):
66
- if aug_vars["do_hflip"][idx]:
67
- output.pred_densepose.hflip(self._transform_data)
68
- # Less memory-intensive averaging
69
- for attr in "SIUV":
70
- setattr(
71
- outputs[0].pred_densepose,
72
- attr,
73
- sum(getattr(o.pred_densepose, attr) for o in outputs) / len(outputs),
74
- )
75
- return outputs[0].pred_densepose
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/complex/csqrtf.h DELETED
@@ -1,147 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- * Copyright 2013 Filipe RNC Maia
4
- *
5
- * Licensed under the Apache License, Version 2.0 (the "License");
6
- * you may not use this file except in compliance with the License.
7
- * You may obtain a copy of the License at
8
- *
9
- * http://www.apache.org/licenses/LICENSE-2.0
10
- *
11
- * Unless required by applicable law or agreed to in writing, software
12
- * distributed under the License is distributed on an "AS IS" BASIS,
13
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- * See the License for the specific language governing permissions and
15
- * limitations under the License.
16
- */
17
-
18
- /*-
19
- * Copyright (c) 2007 David Schultz <[email protected]>
20
- * All rights reserved.
21
- *
22
- * Redistribution and use in source and binary forms, with or without
23
- * modification, are permitted provided that the following conditions
24
- * are met:
25
- * 1. Redistributions of source code must retain the above copyright
26
- * notice, this list of conditions and the following disclaimer.
27
- * 2. Redistributions in binary form must reproduce the above copyright
28
- * notice, this list of conditions and the following disclaimer in the
29
- * documentation and/or other materials provided with the distribution.
30
- *
31
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
32
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
35
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41
- * SUCH DAMAGE.
42
- */
43
-
44
- /*
45
- * Adapted from FreeBSD by Filipe Maia <[email protected]>:
46
- * freebsd/lib/msun/src/s_csqrt.c
47
- */
48
-
49
-
50
- #pragma once
51
-
52
- #include <thrust/complex.h>
53
- #include <thrust/detail/complex/math_private.h>
54
- #include <cmath>
55
-
56
- namespace thrust{
57
- namespace detail{
58
- namespace complex{
59
-
60
- using thrust::complex;
61
-
62
- __host__ __device__ inline
63
- complex<float> csqrtf(const complex<float>& z){
64
- float a = z.real(), b = z.imag();
65
- float t;
66
- int scale;
67
- complex<float> result;
68
-
69
- /* We risk spurious overflow for components >= FLT_MAX / (1 + sqrt(2)). */
70
- const float THRESH = 1.40949553037932e+38f;
71
-
72
- /* Handle special cases. */
73
- if (z == 0.0f)
74
- return (complex<float>(0, b));
75
- if (isinf(b))
76
- return (complex<float>(infinity<float>(), b));
77
- if (isnan(a)) {
78
- t = (b - b) / (b - b); /* raise invalid if b is not a NaN */
79
- return (complex<float>(a, t)); /* return NaN + NaN i */
80
- }
81
- if (isinf(a)) {
82
- /*
83
- * csqrtf(inf + NaN i) = inf + NaN i
84
- * csqrtf(inf + y i) = inf + 0 i
85
- * csqrtf(-inf + NaN i) = NaN +- inf i
86
- * csqrtf(-inf + y i) = 0 + inf i
87
- */
88
- if (signbit(a))
89
- return (complex<float>(fabsf(b - b), copysignf(a, b)));
90
- else
91
- return (complex<float>(a, copysignf(b - b, b)));
92
- }
93
- /*
94
- * The remaining special case (b is NaN) is handled just fine by
95
- * the normal code path below.
96
- */
97
-
98
- /*
99
- * Unlike in the FreeBSD code we'll avoid using double precision as
100
- * not all hardware supports it.
101
- */
102
-
103
- // FLT_MIN*2
104
- const float low_thresh = 2.35098870164458e-38f;
105
- scale = 0;
106
-
107
- if (fabsf(a) >= THRESH || fabsf(b) >= THRESH) {
108
- /* Scale to avoid overflow. */
109
- a *= 0.25f;
110
- b *= 0.25f;
111
- scale = 1;
112
- }else if (fabsf(a) <= low_thresh && fabsf(b) <= low_thresh) {
113
- /* Scale to avoid underflow. */
114
- a *= 4.f;
115
- b *= 4.f;
116
- scale = 2;
117
- }
118
-
119
- /* Algorithm 312, CACM vol 10, Oct 1967. */
120
- if (a >= 0.0f) {
121
- t = sqrtf((a + hypotf(a, b)) * 0.5f);
122
- result = complex<float>(t, b / (2.0f * t));
123
- } else {
124
- t = sqrtf((-a + hypotf(a, b)) * 0.5f);
125
- result = complex<float>(fabsf(b) / (2.0f * t), copysignf(t, b));
126
- }
127
-
128
- /* Rescale. */
129
- if (scale == 1)
130
- return (result * 2.0f);
131
- else if (scale == 2)
132
- return (result * 0.5f);
133
- else
134
- return (result);
135
- }
136
-
137
- } // namespace complex
138
-
139
- } // namespace detail
140
-
141
- template <>
142
- __host__ __device__
143
- inline complex<float> sqrt(const complex<float>& z){
144
- return detail::complex::csqrtf(z);
145
- }
146
-
147
- } // namespace thrust
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/inner_product.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits inner_product
22
- #include <thrust/system/cpp/detail/inner_product.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/modeling/text_encoder/transformer.py DELETED
@@ -1,194 +0,0 @@
1
- from collections import OrderedDict
2
- from typing import Tuple, Union
3
- import logging
4
- import os
5
-
6
- import numpy as np
7
- import torch
8
- import torch.nn.functional as F
9
- from torch import nn
10
-
11
- from timm.models.layers import DropPath, trunc_normal_
12
-
13
- from .registry import register_lang_encoder
14
-
15
- logger = logging.getLogger(__name__)
16
-
17
- class LayerNorm(nn.Module):
18
- def __init__(self, hidden_size, eps=1e-12):
19
- """Construct a layernorm module in the TF style (epsilon inside the square root).
20
- """
21
- super(LayerNorm, self).__init__()
22
- self.weight = nn.Parameter(torch.ones(hidden_size))
23
- self.bias = nn.Parameter(torch.zeros(hidden_size))
24
- self.variance_epsilon = eps
25
-
26
- def forward(self, x):
27
- pdtype = x.dtype
28
- x = x.float()
29
- u = x.mean(-1, keepdim=True)
30
- s = (x - u).pow(2).mean(-1, keepdim=True)
31
- x = (x - u) / torch.sqrt(s + self.variance_epsilon)
32
- return self.weight * x.to(pdtype) + self.bias
33
-
34
-
35
- class QuickGELU(nn.Module):
36
- def forward(self, x: torch.Tensor):
37
- return x * torch.sigmoid(1.702 * x)
38
-
39
-
40
- class ResidualAttentionBlock(nn.Module):
41
- def __init__(self,
42
- d_model: int,
43
- n_head: int,
44
- attn_mask: torch.Tensor = None,
45
- drop_path: float = 0.0):
46
- super().__init__()
47
-
48
- self.attn = nn.MultiheadAttention(d_model, n_head)
49
- self.ln_1 = LayerNorm(d_model)
50
- self.mlp = nn.Sequential(OrderedDict([
51
- ("c_fc", nn.Linear(d_model, d_model * 4)),
52
- ("gelu", QuickGELU()),
53
- ("c_proj", nn.Linear(d_model * 4, d_model))
54
- ]))
55
- self.ln_2 = LayerNorm(d_model)
56
- self.attn_mask = attn_mask
57
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
58
-
59
- def attention(self, x: torch.Tensor, key_padding_mask: torch.Tensor = None):
60
- self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) \
61
- if self.attn_mask is not None else None
62
-
63
-
64
- return self.attn(
65
- x, x, x,
66
- key_padding_mask=key_padding_mask,
67
- need_weights=False,
68
- attn_mask=self.attn_mask
69
- )[0]
70
-
71
- def forward(self, x: torch.Tensor, key_padding_mask: torch.Tensor = None):
72
- x = x + self.drop_path(self.attention(self.ln_1(x), key_padding_mask=key_padding_mask))
73
- x = x + self.drop_path(self.mlp(self.ln_2(x)))
74
- return x
75
-
76
-
77
- class Transformer(nn.Module):
78
- def __init__(self,
79
- context_length: int,
80
- vocab_size: int,
81
- width: int,
82
- layers: int,
83
- heads: int,
84
- drop_path: float = 0.0,
85
- autogressive: bool =True):
86
- super().__init__()
87
-
88
- self.token_embedding = nn.Embedding(vocab_size, width)
89
-
90
- self.context_length = context_length
91
- self.positional_embedding = nn.Parameter(
92
- torch.empty(self.context_length, width)
93
- )
94
-
95
- self.width = width
96
- self.layers = layers
97
- self.autogressive = autogressive
98
- attn_mask = self.build_attention_mask() if autogressive else None
99
- dpr = [x.item() for x in torch.linspace(0, drop_path, layers)] # stochastic depth decay rule
100
- self.resblocks = nn.ModuleList(
101
- [
102
- ResidualAttentionBlock(width, heads, attn_mask, dpr[i])
103
- for i in range(layers)
104
- ]
105
- )
106
-
107
- self.ln_final = LayerNorm(width)
108
-
109
- trunc_normal_(self.positional_embedding, std=.02)
110
- # nn.init.normal_(self.token_embedding, std=.02)
111
- trunc_normal_(self.token_embedding.weight, std=.02)
112
- self.apply(self._init_weights)
113
-
114
- @property
115
- def dim_out(self):
116
- return self.width
117
-
118
- def build_attention_mask(self):
119
- # lazily create causal attention mask, with full attention between the vision tokens
120
- # pytorch uses additive attention mask; fill with -inf
121
- mask = torch.empty(self.context_length, self.context_length)
122
- mask.fill_(float("-inf"))
123
- mask.triu_(1) # zero out the lower diagonal
124
- return mask
125
-
126
- def _init_weights(self, m):
127
- if isinstance(m, (nn.Linear, nn.Conv2d)):
128
- logger.info('=> init weight of Linear/Conv2d from trunc norm')
129
- trunc_normal_(m.weight, std=0.02)
130
- if m.bias is not None:
131
- logger.info('=> init bias of Linear/Conv2d to zeros')
132
- nn.init.constant_(m.bias, 0)
133
- elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
134
- nn.init.constant_(m.bias, 0)
135
-
136
- def load_pretrained(self, pretrained='', pretrained_layers=[], verbose=True):
137
- if os.path.isfile(pretrained):
138
- pretrained_dict = torch.load(pretrained, map_location='cpu')
139
- logging.info(f'=> loading pretrained model {pretrained}')
140
- model_dict = self.state_dict()
141
- pretrained_dict = {
142
- k: v for k, v in pretrained_dict.items()
143
- if k in model_dict.keys()
144
- }
145
- need_init_state_dict = {}
146
- for k, v in pretrained_dict.items():
147
- need_init = (
148
- k.split('.')[0] in pretrained_layers
149
- or pretrained_layers[0] == '*'
150
- )
151
- if need_init:
152
- if verbose:
153
- logging.info(f'=> init {k} from {pretrained}')
154
-
155
- need_init_state_dict[k] = v
156
- self.load_state_dict(need_init_state_dict, strict=False)
157
-
158
-
159
- @torch.jit.ignore
160
- def no_weight_decay(self):
161
- return {
162
- 'positional_embedding',
163
- 'token_embedding',
164
- }
165
-
166
- def forward(self, input_ids, attention_mask=None):
167
- key_padding_mask = (input_ids == 0) if not self.autogressive else None
168
- x = self.token_embedding(input_ids) # [batch_size, n_ctx, d_model]
169
- x = x + self.positional_embedding
170
- x = x.permute(1, 0, 2) # NLD -> LND
171
- for block in self.resblocks:
172
- x = block(x, key_padding_mask)
173
- x = x.permute(1, 0, 2) # LND -> NLD
174
-
175
- x = self.ln_final(x)
176
-
177
- return {'last_hidden_state': x}
178
-
179
-
180
- @register_lang_encoder
181
- def lang_encoder(config_encoder, tokenizer, verbose, **kwargs):
182
- transformer = Transformer(
183
- context_length=config_encoder['CONTEXT_LENGTH'],
184
- vocab_size=tokenizer.vocab_size,
185
- width=config_encoder['WIDTH'],
186
- layers=config_encoder['LAYERS'],
187
- heads=config_encoder['HEADS'],
188
- autogressive=config_encoder.get('AUTOGRESSIVE', True)
189
- )
190
-
191
- if config_encoder['LOAD_PRETRAINED']:
192
- transformer.load_pretrained()
193
-
194
- return transformer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ClassCat/Spleen-3D-segmentation-with-MONAI/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Spleen 3D Segmentation With MONAI
3
- emoji: 🔥
4
- colorFrom: pink
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.16.2
8
- app_file: app.py
9
- pinned: True
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cloudyy/bark-voice-cloning/hubert/pre_kmeans_hubert.py DELETED
@@ -1,85 +0,0 @@
1
- from pathlib import Path
2
-
3
- import torch
4
- from torch import nn
5
- from einops import pack, unpack
6
-
7
- import fairseq
8
-
9
- from torchaudio.functional import resample
10
-
11
- import logging
12
- logging.root.setLevel(logging.ERROR)
13
-
14
-
15
- def exists(val):
16
- return val is not None
17
-
18
-
19
- def default(val, d):
20
- return val if exists(val) else d
21
-
22
-
23
- class CustomHubert(nn.Module):
24
- """
25
- checkpoint and kmeans can be downloaded at https://github.com/facebookresearch/fairseq/tree/main/examples/hubert
26
- or you can train your own
27
- """
28
-
29
- def __init__(
30
- self,
31
- checkpoint_path,
32
- target_sample_hz=16000,
33
- seq_len_multiple_of=None,
34
- output_layer=9
35
- ):
36
- super().__init__()
37
- self.target_sample_hz = target_sample_hz
38
- self.seq_len_multiple_of = seq_len_multiple_of
39
- self.output_layer = output_layer
40
-
41
- model_path = Path(checkpoint_path)
42
-
43
- assert model_path.exists(), f'path {checkpoint_path} does not exist'
44
-
45
- checkpoint = torch.load(checkpoint_path)
46
- load_model_input = {checkpoint_path: checkpoint}
47
- model, *_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(load_model_input)
48
-
49
- self.model = model[0]
50
- self.model.eval()
51
-
52
- @property
53
- def groups(self):
54
- return 1
55
-
56
- @torch.no_grad()
57
- def forward(
58
- self,
59
- wav_input,
60
- flatten=True,
61
- input_sample_hz=None
62
- ):
63
- device = wav_input.device
64
-
65
- if exists(input_sample_hz):
66
- wav_input = resample(wav_input, input_sample_hz, self.target_sample_hz)
67
-
68
- embed = self.model(
69
- wav_input,
70
- features_only=True,
71
- mask=False, # thanks to @maitycyrus for noticing that mask is defaulted to True in the fairseq code
72
- output_layer=self.output_layer
73
- )
74
-
75
- embed, packed_shape = pack([embed['x']], '* d')
76
-
77
- # codebook_indices = self.kmeans.predict(embed.cpu().detach().numpy())
78
-
79
- codebook_indices = torch.from_numpy(embed.cpu().detach().numpy()).to(device) # .long()
80
-
81
- if flatten:
82
- return codebook_indices
83
-
84
- codebook_indices, = unpack(codebook_indices, packed_shape, '*')
85
- return codebook_indices
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/g4f/models.py DELETED
@@ -1,238 +0,0 @@
1
- from g4f import Provider
2
- import random
3
-
4
-
5
- class Model:
6
- class model:
7
- name: str
8
- base_provider: str
9
- best_provider: str
10
-
11
- class gpt_35_turbo:
12
- name: str = 'gpt-3.5-turbo'
13
- base_provider: str = 'openai'
14
- best_provider: Provider.Provider = Provider.DeepAi
15
-
16
- class gpt_35_turbo_0613:
17
- name: str = 'gpt-3.5-turbo-0613'
18
- base_provider: str = 'openai'
19
- best_provider: Provider.Provider = Provider.Zeabur
20
-
21
- class gpt_35_turbo_0301:
22
- name: str = 'gpt-3.5-turbo-0301'
23
- base_provider: str = 'openai'
24
- best_provider: Provider.Provider = Provider.Zeabur
25
-
26
- class gpt_35_turbo_16k_0613:
27
- name: str = 'gpt-3.5-turbo-16k-0613'
28
- base_provider: str = 'openai'
29
- best_provider: Provider.Provider = Provider.Zeabur
30
-
31
- class gpt_35_turbo_16k:
32
- name: str = 'gpt-3.5-turbo-16k'
33
- base_provider: str = 'openai'
34
- best_provider: Provider.Provider = Provider.Zeabur
35
-
36
- class gpt_4_dev:
37
- name: str = 'gpt-4-for-dev'
38
- base_provider: str = 'openai'
39
- best_provider: Provider.Provider = Provider.Phind
40
-
41
- class gpt_4:
42
- name: str = 'gpt-4'
43
- base_provider: str = 'openai'
44
- best_provider: Provider.Provider = Provider.ChatgptAi
45
-
46
- class gpt_4_0613:
47
- name: str = 'gpt-4-0613'
48
- base_provider: str = 'openai'
49
- best_provider: Provider.Provider = Provider.Lockchat
50
- best_providers: list = [Provider.Bing, Provider.Lockchat]
51
-
52
- class claude_instant_v1_100k:
53
- name: str = 'claude-instant-v1-100k'
54
- base_provider: str = 'anthropic'
55
- best_provider: Provider.Provider = Provider.Vercel
56
-
57
- class claude_instant_v1:
58
- name: str = 'claude-instant-v1'
59
- base_provider: str = 'anthropic'
60
- best_provider: Provider.Provider = Provider.Vercel
61
-
62
- class claude_v1_100k:
63
- name: str = 'claude-v1-100k'
64
- base_provider: str = 'anthropic'
65
- best_provider: Provider.Provider = Provider.Vercel
66
-
67
- class claude_v1:
68
- name: str = 'claude-v1'
69
- base_provider: str = 'anthropic'
70
- best_provider: Provider.Provider = Provider.Vercel
71
-
72
- class alpaca_7b:
73
- name: str = 'alpaca-7b'
74
- base_provider: str = 'replicate'
75
- best_provider: Provider.Provider = Provider.Vercel
76
-
77
- class stablelm_tuned_alpha_7b:
78
- name: str = 'stablelm-tuned-alpha-7b'
79
- base_provider: str = 'replicate'
80
- best_provider: Provider.Provider = Provider.Vercel
81
-
82
- class bloom:
83
- name: str = 'bloom'
84
- base_provider: str = 'huggingface'
85
- best_provider: Provider.Provider = Provider.Vercel
86
-
87
- class bloomz:
88
- name: str = 'bloomz'
89
- base_provider: str = 'huggingface'
90
- best_provider: Provider.Provider = Provider.Vercel
91
-
92
- class flan_t5_xxl:
93
- name: str = 'flan-t5-xxl'
94
- base_provider: str = 'huggingface'
95
- best_provider: Provider.Provider = Provider.Vercel
96
-
97
- class flan_ul2:
98
- name: str = 'flan-ul2'
99
- base_provider: str = 'huggingface'
100
- best_provider: Provider.Provider = Provider.Vercel
101
-
102
- class gpt_neox_20b:
103
- name: str = 'gpt-neox-20b'
104
- base_provider: str = 'huggingface'
105
- best_provider: Provider.Provider = Provider.Vercel
106
-
107
- class oasst_sft_4_pythia_12b_epoch_35:
108
- name: str = 'oasst-sft-4-pythia-12b-epoch-3.5'
109
- base_provider: str = 'huggingface'
110
- best_provider: Provider.Provider = Provider.Vercel
111
-
112
- class santacoder:
113
- name: str = 'santacoder'
114
- base_provider: str = 'huggingface'
115
- best_provider: Provider.Provider = Provider.Vercel
116
-
117
- class command_medium_nightly:
118
- name: str = 'command-medium-nightly'
119
- base_provider: str = 'cohere'
120
- best_provider: Provider.Provider = Provider.Vercel
121
-
122
- class command_xlarge_nightly:
123
- name: str = 'command-xlarge-nightly'
124
- base_provider: str = 'cohere'
125
- best_provider: Provider.Provider = Provider.Vercel
126
-
127
- class code_cushman_001:
128
- name: str = 'code-cushman-001'
129
- base_provider: str = 'openai'
130
- best_provider: Provider.Provider = Provider.Vercel
131
-
132
- class code_davinci_002:
133
- name: str = 'code-davinci-002'
134
- base_provider: str = 'openai'
135
- best_provider: Provider.Provider = Provider.Vercel
136
-
137
- class text_ada_001:
138
- name: str = 'text-ada-001'
139
- base_provider: str = 'openai'
140
- best_provider: Provider.Provider = Provider.Vercel
141
-
142
- class text_babbage_001:
143
- name: str = 'text-babbage-001'
144
- base_provider: str = 'openai'
145
- best_provider: Provider.Provider = Provider.Vercel
146
-
147
- class text_curie_001:
148
- name: str = 'text-curie-001'
149
- base_provider: str = 'openai'
150
- best_provider: Provider.Provider = Provider.Vercel
151
-
152
- class text_davinci_002:
153
- name: str = 'text-davinci-002'
154
- base_provider: str = 'openai'
155
- best_provider: Provider.Provider = Provider.Vercel
156
-
157
- class text_davinci_003:
158
- name: str = 'text-davinci-003'
159
- base_provider: str = 'openai'
160
- best_provider: Provider.Provider = Provider.Vercel
161
-
162
- class palm:
163
- name: str = 'palm2'
164
- base_provider: str = 'google'
165
- best_provider: Provider.Provider = Provider.Bard
166
-
167
- """ 'falcon-40b': Model.falcon_40b,
168
- 'falcon-7b': Model.falcon_7b,
169
- 'llama-13b': Model.llama_13b,"""
170
-
171
- class falcon_40b:
172
- name: str = 'falcon-40b'
173
- base_provider: str = 'huggingface'
174
- best_provider: Provider.Provider = Provider.H2o
175
-
176
- class falcon_7b:
177
- name: str = 'falcon-7b'
178
- base_provider: str = 'huggingface'
179
- best_provider: Provider.Provider = Provider.H2o
180
-
181
- class llama_13b:
182
- name: str = 'llama-13b'
183
- base_provider: str = 'huggingface'
184
- best_provider: Provider.Provider = Provider.H2o
185
-
186
-
187
- class ModelUtils:
188
- convert: dict = {
189
- 'gpt-3.5-turbo': Model.gpt_35_turbo,
190
- 'gpt-3.5-turbo-0613': Model.gpt_35_turbo_0613,
191
- 'gpt-3.5-turbo-0301': Model.gpt_35_turbo_0301,
192
- 'gpt-4': Model.gpt_4,
193
- 'gpt-4-0613': Model.gpt_4_0613,
194
- 'gpt-4-for-dev': Model.gpt_4_dev,
195
- 'gpt-3.5-turbo-16k': Model.gpt_35_turbo_16k,
196
- 'gpt-3.5-turbo-16k-0613': Model.gpt_35_turbo_16k_0613,
197
-
198
- 'claude-instant-v1-100k': Model.claude_instant_v1_100k,
199
- 'claude-v1-100k': Model.claude_v1_100k,
200
- 'claude-instant-v1': Model.claude_instant_v1,
201
- 'claude-v1': Model.claude_v1,
202
-
203
- 'alpaca-7b': Model.alpaca_7b,
204
- 'stablelm-tuned-alpha-7b': Model.stablelm_tuned_alpha_7b,
205
-
206
- 'bloom': Model.bloom,
207
- 'bloomz': Model.bloomz,
208
-
209
- 'flan-t5-xxl': Model.flan_t5_xxl,
210
- 'flan-ul2': Model.flan_ul2,
211
-
212
- 'gpt-neox-20b': Model.gpt_neox_20b,
213
- 'oasst-sft-4-pythia-12b-epoch-3.5': Model.oasst_sft_4_pythia_12b_epoch_35,
214
- 'santacoder': Model.santacoder,
215
-
216
- 'command-medium-nightly': Model.command_medium_nightly,
217
- 'command-xlarge-nightly': Model.command_xlarge_nightly,
218
-
219
- 'code-cushman-001': Model.code_cushman_001,
220
- 'code-davinci-002': Model.code_davinci_002,
221
-
222
- 'text-ada-001': Model.text_ada_001,
223
- 'text-babbage-001': Model.text_babbage_001,
224
- 'text-curie-001': Model.text_curie_001,
225
- 'text-davinci-002': Model.text_davinci_002,
226
- 'text-davinci-003': Model.text_davinci_003,
227
-
228
- 'palm2': Model.palm,
229
- 'palm': Model.palm,
230
- 'google': Model.palm,
231
- 'google-bard': Model.palm,
232
- 'google-palm': Model.palm,
233
- 'bard': Model.palm,
234
-
235
- 'falcon-40b': Model.falcon_40b,
236
- 'falcon-7b': Model.falcon_7b,
237
- 'llama-13b': Model.llama_13b,
238
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cropinky/hana_hanak_houses/image_generator.py DELETED
@@ -1,156 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- """Generate images using pretrained network pickle."""
10
-
11
- from ast import parse
12
- import os
13
- from pyexpat import model
14
- import re
15
- from typing import List, Optional, Tuple, Union
16
- import numpy as np
17
- import PIL.Image
18
- import torch
19
- from networks_fastgan import MyGenerator
20
- import random
21
- #----------------------------------------------------------------------------
22
-
23
- def parse_range(s: Union[str, List]) -> List[int]:
24
- '''Parse a comma separated list of numbers or ranges and return a list of ints.
25
-
26
- Example: '1,2,5-10' returns [1, 2, 5, 6, 7]
27
- '''
28
- if isinstance(s, list): return s
29
- ranges = []
30
- range_re = re.compile(r'^(\d+)-(\d+)$')
31
- for p in s.split(','):
32
- m = range_re.match(p)
33
- if m:
34
- ranges.extend(range(int(m.group(1)), int(m.group(2))+1))
35
- else:
36
- ranges.append(int(p))
37
- return ranges
38
-
39
- #----------------------------------------------------------------------------
40
-
41
- def parse_vec2(s: Union[str, Tuple[float, float]]) -> Tuple[float, float]:
42
- '''Parse a floating point 2-vector of syntax 'a,b'.
43
-
44
- Example:
45
- '0,1' returns (0,1)
46
- '''
47
- if isinstance(s, tuple): return s
48
- parts = s.split(',')
49
- if len(parts) == 2:
50
- return (float(parts[0]), float(parts[1]))
51
- raise ValueError(f'cannot parse 2-vector {s}')
52
-
53
- #----------------------------------------------------------------------------
54
-
55
- def make_transform(translate: Tuple[float,float], angle: float):
56
- m = np.eye(3)
57
- s = np.sin(angle/360.0*np.pi*2)
58
- c = np.cos(angle/360.0*np.pi*2)
59
- m[0][0] = c
60
- m[0][1] = s
61
- m[0][2] = translate[0]
62
- m[1][0] = -s
63
- m[1][1] = c
64
- m[1][2] = translate[1]
65
- return m
66
-
67
- #----------------------------------------------------------------------------
68
-
69
- def generate_images(
70
- model_path,
71
- seeds = "10-12",
72
- truncation_psi = 1.0,
73
- noise_mode = "const",
74
- outdir = "out",
75
- translate = "0,0",
76
- rotate = 0,
77
- number_of_images = 16
78
- ):
79
- model_owner = "huggan"
80
- #inputs = gr.inputs.Radio(["Abstract Expressionism", "Impressionism", "Cubism", "Minimalism", "Pop Art", "Color Field", "Hana Hanak houses"])
81
- model_path_dict = {
82
- 'Impressionism' : 'projected_gan_impressionism',
83
- 'Cubism' : 'projected_gan_cubism',
84
- 'Abstract Expressionism' : 'projected_gan_abstract_expressionism',
85
- 'Pop Art' : 'projected_gan_popart',
86
- 'Minimalism' : 'projected_gan_minimalism',
87
- 'Color Field' : 'projected_gan_color_field',
88
- 'Hana Hanak houses' : 'projected_gan_Hana_Hanak',
89
- 'Hana Hanak houses - abstract expressionism' : 'projected_gan_abstract_expressionism_hana',
90
- 'Hana Hanak houses - color field' : 'projected_gan_color_field_hana',
91
-
92
- }
93
-
94
- model_path = model_owner + "/" + model_path_dict[model_path]
95
- print(model_path)
96
- print(seeds)
97
- seeds=[random.randint(1,230)]
98
- #seeds =f"{seeds}-{seeds+number_of_images-1}"
99
- #seeds = parse_range(seeds)
100
- #device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
101
- device = torch.device('cpu')
102
- G = MyGenerator.from_pretrained(model_path)
103
- os.makedirs(outdir, exist_ok=True)
104
- # Labels.
105
- label = torch.zeros([1, G.c_dim], device=device)
106
- """
107
- if G.c_dim != 0:
108
- if class_idx is None:
109
- raise click.ClickException('Must specify class label with --class when using a conditional network')
110
- label[:, class_idx] = 1
111
- else:
112
- if class_idx is not None:
113
- print ('warn: --class=lbl ignored when running on an unconditional network')
114
- """
115
-
116
- print(f"z dimenzija mi je: {G.z_dim}")
117
- # Generate images.
118
-
119
- #imgs_row = np.array()
120
- #imgs_complete = np.array()
121
- print(seeds)
122
- for seed_idx, seed in enumerate(seeds):
123
- print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
124
- z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device).float()
125
- # Construct an inverse rotation/translation matrix and pass to the generator. The
126
- # generator expects this matrix as an inverse to avoid potentially failing numerical
127
- # operations in the network.
128
- if hasattr(G.synthesis, 'input'):
129
- m = make_transform(translate, rotate)
130
- m = np.linalg.inv(m)
131
- G.synthesis.input.transform.copy_(torch.from_numpy(m))
132
-
133
- img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode)
134
- img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
135
- print(seed_idx)
136
- """
137
- #first image
138
- if seed_idx == 0:
139
- imgs_row = img[0].cpu().numpy()
140
- else:
141
- imgs_row = np.hstack((imgs_row, img[0].cpu().numpy()))"""
142
- #img = PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB')
143
- #PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/seed{seed:04d}.png')
144
- #napravi vsplit i toe to ka
145
- #imgs_complete = np.vstack(np.hsplit(imgs_row, 4))
146
- #cv2.imshow("lalaxd", imgs_complete)
147
- #cv2.waitKey()
148
- return PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB')
149
-
150
-
151
- #----------------------------------------------------------------------------
152
-
153
- if __name__ == "__main__":
154
- generate_images() # pylint: disable=no-value-for-parameter
155
-
156
- #----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-b7998330.js DELETED
@@ -1,2 +0,0 @@
1
- import{S as M,e as N,s as O,N as z,k,O as Q,K as S,p as j,o as w,M as E,ap as T,Q as A,z as C,v as B,A as q,x as P,a1 as L,B as V,am as W,P as X,R as Y,F as g,ak as r,E as Z,ae as y,h as D,j as F,q as p,r as x,t as K}from"./index-1d65707a.js";/* empty css */import{B as $}from"./Button-f155035a.js";import{B as ee}from"./BlockTitle-dee077e8.js";import"./Info-7c6961ef.js";function te(t){let e;return{c(){e=X(t[1])},m(l,s){j(l,e,s)},p(l,s){s&2&&Y(e,l[1])},d(l){l&&q(e)}}}function le(t){let e,l,s,a,o,c,h;return l=new ee({props:{show_label:t[4],info:t[2],$$slots:{default:[te]},$$scope:{ctx:t}}}),{c(){e=z("label"),k(l.$$.fragment),s=Q(),a=z("input"),S(a,"type","color"),a.disabled=t[3],S(a,"class","svelte-56zyyb"),S(e,"class","block")},m(_,f){j(_,e,f),w(l,e,null),E(e,s),E(e,a),T(a,t[0]),o=!0,c||(h=[A(a,"blur",t[6]),A(a,"input",t[7])],c=!0)},p(_,[f]){const m={};f&16&&(m.show_label=_[4]),f&4&&(m.info=_[2]),f&1026&&(m.$$scope={dirty:f,ctx:_}),l.$set(m),(!o||f&8)&&(a.disabled=_[3]),f&1&&T(a,_[0])},i(_){o||(C(l.$$.fragment,_),o=!0)},o(_){B(l.$$.fragment,_),o=!1},d(_){_&&q(e),P(l),c=!1,L(h)}}}function se(t,e,l){let{value:s="#000000"}=e,{value_is_output:a=!1}=e,{label:o}=e,{info:c=void 0}=e,{disabled:h=!1}=e,{show_label:_=!0}=e;const f=V();function m(){f("change",s),a||f("input")}W(()=>{l(5,a=!1)});function d(u){g.call(this,t,u)}function n(){s=this.value,l(0,s)}return t.$$set=u=>{"value"in u&&l(0,s=u.value),"value_is_output"in u&&l(5,a=u.value_is_output),"label"in u&&l(1,o=u.label),"info"in u&&l(2,c=u.info),"disabled"in u&&l(3,h=u.disabled),"show_label"in u&&l(4,_=u.show_label)},t.$$.update=()=>{t.$$.dirty&1&&m()},[s,o,c,h,_,a,d,n]}class ie extends M{constructor(e){super(),N(this,e,se,le,O,{value:0,value_is_output:5,label:1,info:2,disabled:3,show_label:4})}}function ne(t){let e,l,s,a,o,c;const h=[t[11]];let _={};for(let n=0;n<h.length;n+=1)_=Z(_,h[n]);e=new y({props:_});function f(n){t[13](n)}function m(n){t[14](n)}let d={label:t[2],info:t[3],show_label:t[7],disabled:t[12]==="static"};return t[0]!==void 0&&(d.value=t[0]),t[1]!==void 0&&(d.value_is_output=t[1]),s=new ie({props:d}),D.push(()=>F(s,"value",f)),D.push(()=>F(s,"value_is_output",m)),s.$on("change",t[15]),s.$on("input",t[16]),s.$on("submit",t[17]),s.$on("blur",t[18]),{c(){k(e.$$.fragment),l=Q(),k(s.$$.fragment)},m(n,u){w(e,n,u),j(n,l,u),w(s,n,u),c=!0},p(n,u){const v=u&2048?p(h,[x(n[11])]):{};e.$set(v);const b={};u&4&&(b.label=n[2]),u&8&&(b.info=n[3]),u&128&&(b.show_label=n[7]),u&4096&&(b.disabled=n[12]==="static"),!a&&u&1&&(a=!0,b.value=n[0],K(()=>a=!1)),!o&&u&2&&(o=!0,b.value_is_output=n[1],K(()=>o=!1)),s.$set(b)},i(n){c||(C(e.$$.fragment,n),C(s.$$.fragment,n),c=!0)},o(n){B(e.$$.fragment,n),B(s.$$.fragment,n),c=!1},d(n){n&&q(l),P(e,n),P(s,n)}}}function ae(t){let e,l;return e=new $({props:{visible:t[6],elem_id:t[4],elem_classes:t[5],container:t[8],scale:t[9],min_width:t[10],$$slots:{default:[ne]},$$scope:{ctx:t}}}),{c(){k(e.$$.fragment)},m(s,a){w(e,s,a),l=!0},p(s,[a]){const o={};a&64&&(o.visible=s[6]),a&16&&(o.elem_id=s[4]),a&32&&(o.elem_classes=s[5]),a&256&&(o.container=s[8]),a&512&&(o.scale=s[9]),a&1024&&(o.min_width=s[10]),a&530575&&(o.$$scope={dirty:a,ctx:s}),e.$set(o)},i(s){l||(C(e.$$.fragment,s),l=!0)},o(s){B(e.$$.fragment,s),l=!1},d(s){P(e,s)}}}function ue(t,e,l){let{label:s="ColorPicker"}=e,{info:a=void 0}=e,{elem_id:o=""}=e,{elem_classes:c=[]}=e,{visible:h=!0}=e,{value:_}=e,{value_is_output:f=!1}=e,{show_label:m}=e,{container:d=!0}=e,{scale:n=null}=e,{min_width:u=void 0}=e,{loading_status:v}=e,{mode:b}=e;function R(i){_=i,l(0,_)}function U(i){f=i,l(1,f)}function G(i){g.call(this,t,i)}function H(i){g.call(this,t,i)}function I(i){g.call(this,t,i)}function J(i){g.call(this,t,i)}return t.$$set=i=>{"label"in i&&l(2,s=i.label),"info"in i&&l(3,a=i.info),"elem_id"in i&&l(4,o=i.elem_id),"elem_classes"in i&&l(5,c=i.elem_classes),"visible"in i&&l(6,h=i.visible),"value"in i&&l(0,_=i.value),"value_is_output"in i&&l(1,f=i.value_is_output),"show_label"in i&&l(7,m=i.show_label),"container"in i&&l(8,d=i.container),"scale"in i&&l(9,n=i.scale),"min_width"in i&&l(10,u=i.min_width),"loading_status"in i&&l(11,v=i.loading_status),"mode"in i&&l(12,b=i.mode)},[_,f,s,a,o,c,h,m,d,n,u,v,b,R,U,G,H,I,J]}class _e extends M{constructor(e){super(),N(this,e,ue,ae,O,{label:2,info:3,elem_id:4,elem_classes:5,visible:6,value:0,value_is_output:1,show_label:7,container:8,scale:9,min_width:10,loading_status:11,mode:12})}get label(){return this.$$.ctx[2]}set label(e){this.$$set({label:e}),r()}get info(){return this.$$.ctx[3]}set info(e){this.$$set({info:e}),r()}get elem_id(){return this.$$.ctx[4]}set elem_id(e){this.$$set({elem_id:e}),r()}get elem_classes(){return this.$$.ctx[5]}set elem_classes(e){this.$$set({elem_classes:e}),r()}get visible(){return this.$$.ctx[6]}set visible(e){this.$$set({visible:e}),r()}get value(){return this.$$.ctx[0]}set value(e){this.$$set({value:e}),r()}get value_is_output(){return this.$$.ctx[1]}set value_is_output(e){this.$$set({value_is_output:e}),r()}get show_label(){return this.$$.ctx[7]}set show_label(e){this.$$set({show_label:e}),r()}get container(){return this.$$.ctx[8]}set container(e){this.$$set({container:e}),r()}get scale(){return this.$$.ctx[9]}set scale(e){this.$$set({scale:e}),r()}get min_width(){return this.$$.ctx[10]}set min_width(e){this.$$set({min_width:e}),r()}get loading_status(){return this.$$.ctx[11]}set loading_status(e){this.$$set({loading_status:e}),r()}get mode(){return this.$$.ctx[12]}set mode(e){this.$$set({mode:e}),r()}}const me=_e,be=["static","dynamic"],de=t=>({type:{payload:"string"},description:{payload:"hex color code"},example_data:t.value??"#000000"});export{me as Component,de as document,be as modes};
2
- //# sourceMappingURL=index-b7998330.js.map
 
 
 
spaces/DeepFloyd/IF/app.py DELETED
@@ -1,701 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- import datetime
4
- import hashlib
5
- import json
6
- import os
7
- import random
8
- import tempfile
9
- import shortuuid
10
- from apscheduler.schedulers.background import BackgroundScheduler
11
- import shutil
12
-
13
- import gradio as gr
14
- import torch
15
- from huggingface_hub import HfApi
16
- from share_btn import community_icon_html, loading_icon_html, share_js
17
-
18
- # isort: off
19
- from model import Model
20
- from settings import (
21
- DEBUG,
22
- DEFAULT_CUSTOM_TIMESTEPS_1,
23
- DEFAULT_CUSTOM_TIMESTEPS_2,
24
- DEFAULT_NUM_IMAGES,
25
- DEFAULT_NUM_STEPS_3,
26
- DISABLE_SD_X4_UPSCALER,
27
- GALLERY_COLUMN_NUM,
28
- HF_TOKEN,
29
- MAX_NUM_IMAGES,
30
- MAX_NUM_STEPS,
31
- MAX_QUEUE_SIZE,
32
- MAX_SEED,
33
- SHOW_ADVANCED_OPTIONS,
34
- SHOW_CUSTOM_TIMESTEPS_1,
35
- SHOW_CUSTOM_TIMESTEPS_2,
36
- SHOW_DEVICE_WARNING,
37
- SHOW_DUPLICATE_BUTTON,
38
- SHOW_NUM_IMAGES,
39
- SHOW_NUM_STEPS_1,
40
- SHOW_NUM_STEPS_2,
41
- SHOW_NUM_STEPS_3,
42
- SHOW_UPSCALE_TO_256_BUTTON,
43
- UPLOAD_REPO_ID,
44
- UPLOAD_RESULT_IMAGE,
45
- )
46
- # isort: on
47
-
48
- TITLE = '# [DeepFloyd IF](https://github.com/deep-floyd/IF)'
49
- DESCRIPTION = 'The DeepFloyd IF model has been initially released as a non-commercial research-only model. Please make sure you read and abide to the [LICENSE](https://huggingface.co/spaces/DeepFloyd/deepfloyd-if-license) before using it.'
50
- DISCLAIMER = 'In this demo, the DeepFloyd team may collect prompts, and user preferences (which of the images the user chose to upscale) for improving future models'
51
- FOOTER = """<div class="footer">
52
- <p>Model by <a href="https://huggingface.co/DeepFloyd" style="text-decoration: underline;" target="_blank">DeepFloyd</a> supported by <a href="https://huggingface.co/stabilityai" style="text-decoration: underline;" target="_blank">Stability AI</a>
53
- </p>
54
- </div>
55
- <div class="acknowledgments">
56
- <p><h4>LICENSE</h4>
57
- The model is licensed with a bespoke non-commercial research-only license <a href="https://huggingface.co/spaces/DeepFloyd/deepfloyd-if-license" style="text-decoration: underline;" target="_blank">DeepFloyd IF Research License Agreement</a> license. The license forbids you from sharing any content for commercial use, or that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please <a href="https://huggingface.co/spaces/DeepFloyd/deepfloyd-if-license" style="text-decoration: underline;" target="_blank">read the license</a></p>
58
- <p><h4>Biases and content acknowledgment</h4>
59
- Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, explicit content and violence. The model was trained on a subset of the <a href="https://laion.ai/blog/laion-5b/" style="text-decoration: underline;" target="_blank">LAION-5B dataset</a> and is meant for research purposes. You can read more in the <a href="https://huggingface.co/DeepFloyd/IF-I-IF-v1.0" style="text-decoration: underline;" target="_blank">model card</a></p>
60
- </div>
61
- """
62
- if SHOW_DUPLICATE_BUTTON:
63
- SPACE_ID = os.getenv('SPACE_ID')
64
- DESCRIPTION += f'\n<p><a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space%20to%20skip%20the%20queue-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></p>'
65
-
66
- if SHOW_DEVICE_WARNING and not torch.cuda.is_available():
67
- DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
68
-
69
- model = Model()
70
-
71
-
72
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
73
- if randomize_seed:
74
- seed = random.randint(0, MAX_SEED)
75
- return seed
76
-
77
-
78
- def get_stage2_index(evt: gr.SelectData) -> int:
79
- return evt.index
80
-
81
-
82
- def check_if_stage2_selected(index: int) -> None:
83
- if index == -1:
84
- raise gr.Error(
85
- 'You need to select the image you would like to upscale from the Stage 1 results by clicking.'
86
- )
87
-
88
-
89
- hf_api = HfApi(token=HF_TOKEN)
90
- if UPLOAD_REPO_ID:
91
- hf_api.create_repo(repo_id=UPLOAD_REPO_ID,
92
- private=True,
93
- repo_type='dataset',
94
- exist_ok=True)
95
-
96
-
97
- def get_param_file_hash_name(param_filepath: str) -> str:
98
- if not UPLOAD_REPO_ID:
99
- return ''
100
- with open(param_filepath, 'rb') as f:
101
- md5 = hashlib.md5(f.read()).hexdigest()
102
- utcnow = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S-%f')
103
- return f'{utcnow}-{md5}'
104
-
105
-
106
- def upload_stage1_result(stage1_param_path: str, stage1_result_path: str,
107
- save_name: str) -> None:
108
- if not UPLOAD_REPO_ID:
109
- return
110
- try:
111
- folder_params = "tmp/results/stage1_params"
112
- folder_results = "tmp/results/stage1_results"
113
-
114
- path_params = f"{folder_params}/{save_name}.json"
115
- path_results = f"{folder_results}/{save_name}.pth"
116
-
117
- os.makedirs(folder_params, exist_ok=True)
118
- os.makedirs(folder_results, exist_ok=True)
119
-
120
- shutil.copy(stage1_param_path, path_params)
121
- shutil.copy(stage1_result_path, path_results)
122
-
123
- except Exception as e:
124
- print(e)
125
-
126
-
127
- def upload_stage2_info(stage1_param_file_hash_name: str,
128
- stage2_output_path: str,
129
- selected_index_for_upscale: int, seed_2: int,
130
- guidance_scale_2: float, custom_timesteps_2: str,
131
- num_inference_steps_2: int) -> None:
132
- if not UPLOAD_REPO_ID:
133
- return
134
- if not stage1_param_file_hash_name:
135
- raise ValueError
136
-
137
- stage2_params = {
138
- 'stage1_param_file_hash_name': stage1_param_file_hash_name,
139
- 'selected_index_for_upscale': selected_index_for_upscale,
140
- 'seed_2': seed_2,
141
- 'guidance_scale_2': guidance_scale_2,
142
- 'custom_timesteps_2': custom_timesteps_2,
143
- 'num_inference_steps_2': num_inference_steps_2,
144
- }
145
- with tempfile.NamedTemporaryFile(mode='w', delete=False) as param_file:
146
- param_file.write(json.dumps(stage2_params))
147
- stage2_param_file_hash_name = get_param_file_hash_name(param_file.name)
148
- save_name = f'{stage1_param_file_hash_name}_{stage2_param_file_hash_name}'
149
-
150
- try:
151
- folder_params = "tmp/results/stage2_params"
152
-
153
- os.makedirs(folder_params, exist_ok=True)
154
- path_params = f"{folder_params}/{save_name}.json"
155
- shutil.copy(param_file.name, path_params)
156
-
157
- if UPLOAD_RESULT_IMAGE:
158
- folder_results = "tmp/results/stage2_results"
159
- os.makedirs(folder_results, exist_ok=True)
160
- path_results = f"{folder_results}/{save_name}.png"
161
- shutil.copy(stage2_output_path, path_results)
162
-
163
- except Exception as e:
164
- print(e)
165
-
166
-
167
- def upload_stage2_3_info(stage1_param_file_hash_name: str,
168
- stage2_3_output_path: str,
169
- selected_index_for_upscale: int, seed_2: int,
170
- guidance_scale_2: float, custom_timesteps_2: str,
171
- num_inference_steps_2: int, prompt: str,
172
- negative_prompt: str, seed_3: int,
173
- guidance_scale_3: float,
174
- num_inference_steps_3: int) -> None:
175
- if not UPLOAD_REPO_ID:
176
- return
177
- if not stage1_param_file_hash_name:
178
- raise ValueError
179
-
180
- stage2_3_params = {
181
- 'stage1_param_file_hash_name': stage1_param_file_hash_name,
182
- 'selected_index_for_upscale': selected_index_for_upscale,
183
- 'seed_2': seed_2,
184
- 'guidance_scale_2': guidance_scale_2,
185
- 'custom_timesteps_2': custom_timesteps_2,
186
- 'num_inference_steps_2': num_inference_steps_2,
187
- 'prompt': prompt,
188
- 'negative_prompt': negative_prompt,
189
- 'seed_3': seed_3,
190
- 'guidance_scale_3': guidance_scale_3,
191
- 'num_inference_steps_3': num_inference_steps_3,
192
- }
193
- with tempfile.NamedTemporaryFile(mode='w', delete=False) as param_file:
194
- param_file.write(json.dumps(stage2_3_params))
195
- stage2_3_param_file_hash_name = get_param_file_hash_name(param_file.name)
196
- save_name = f'{stage1_param_file_hash_name}_{stage2_3_param_file_hash_name}'
197
-
198
- try:
199
- folder_params = "tmp/results/stage2_3_params"
200
- os.makedirs(folder_params, exist_ok=True)
201
- path_params = f"{folder_params}/{save_name}.json"
202
- shutil.copy(param_file.name, path_params)
203
-
204
- if UPLOAD_RESULT_IMAGE:
205
- folder_results = "tmp/results/stage2_3_results"
206
- os.makedirs(folder_results, exist_ok=True)
207
- path_results = f"{folder_results}/{save_name}.png"
208
- shutil.copy(stage2_3_output_path, path_results)
209
- except Exception as e:
210
- print(e)
211
-
212
-
213
- def update_upscale_button(selected_index: int) -> tuple[dict, dict]:
214
- if selected_index == -1:
215
- return gr.update(interactive=False), gr.update(interactive=False)
216
- else:
217
- return gr.update(interactive=True), gr.update(interactive=True)
218
-
219
-
220
- def _update_result_view(show_gallery: bool) -> tuple[dict, dict]:
221
- return gr.update(visible=show_gallery), gr.update(visible=not show_gallery)
222
-
223
-
224
- def show_gallery_view() -> tuple[dict, dict]:
225
- return _update_result_view(True)
226
-
227
-
228
- def show_upscaled_view() -> tuple[dict, dict]:
229
- return _update_result_view(False)
230
-
231
- def upload_files():
232
- """Zips files and uploads to dataset. Local data is deleted
233
- """
234
- if os.path.exists("tmp/results") and os.path.isdir("tmp/results"):
235
- try:
236
- random_folder = random.randint(0,1000)
237
- shutil.make_archive("tmp/results", 'zip', "tmp/results")
238
- hf_api.upload_file(
239
- path_or_fileobj="tmp/results.zip",
240
- path_in_repo=f"{random_folder}/results_{shortuuid.uuid()}.zip",
241
- repo_id=UPLOAD_REPO_ID,
242
- repo_type="dataset",
243
- )
244
- shutil.rmtree("tmp/results")
245
- except Exception as e:
246
- print(e)
247
-
248
- examples = [
249
- 'high quality dslr photo, a photo product of a lemon inspired by natural and organic materials, wooden accents, intricately decorated with glowing vines of led lights, inspired by baroque luxury',
250
- 'paper quilling, extremely detailed, paper quilling of a nordic mountain landscape, 8k rendering',
251
- 'letters made of candy on a plate that says "diet"',
252
- 'a photo of a violet baseball cap with yellow text: "deep floyd". 50mm lens, photo realism, cine lens. violet baseball cap says "deep floyd". reflections, render. yellow stitch text "deep floyd"',
253
- 'ultra close-up color photo portrait of rainbow owl with deer horns in the woods',
254
- 'a cloth embroidered with the text "laion" and an embroidered cute baby lion face',
255
- 'product image of a crochet Cthulhu the great old one emerging from a spacetime wormhole made of wool',
256
- 'a little green budgie parrot driving small red toy car in new york street, photo',
257
- 'origami dancer in white paper, 3d render, ultra-detailed, on white background, studio shot.',
258
- 'glowing mushrooms in a natural environment with smoke in the frame',
259
- 'a subway train\'s digital sign saying "open source", vsco preset, 35mm photo, film grain, in a dim subway station',
260
- 'a bowl full of few adorable golden doodle puppies, the doodles dusted in powdered sugar and look delicious, bokeh, cannon. professional macro photo, super detailed. cute sweet golden doodle confectionery, baking puppies in powdered sugar in the bowl',
261
- 'a face of a woman made completely out of foliage, twigs, leaves and flowers, side view'
262
- ]
263
-
264
- with gr.Blocks(css='style.css') as demo:
265
- gr.Markdown(TITLE)
266
- gr.Markdown(DESCRIPTION)
267
- with gr.Box():
268
- with gr.Row(elem_id='prompt-container').style(equal_height=True):
269
- with gr.Column():
270
- prompt = gr.Text(
271
- label='Prompt',
272
- show_label=False,
273
- max_lines=1,
274
- placeholder='Enter your prompt',
275
- elem_id='prompt-text-input',
276
- ).style(container=False)
277
- negative_prompt = gr.Text(
278
- label='Negative prompt',
279
- show_label=False,
280
- max_lines=1,
281
- placeholder='Enter a negative prompt',
282
- elem_id='negative-prompt-text-input',
283
- ).style(container=False)
284
- generate_button = gr.Button('Generate').style(full_width=False)
285
-
286
- with gr.Column() as gallery_view:
287
- gallery = gr.Gallery(label='Stage 1 results',
288
- show_label=False,
289
- elem_id='gallery').style(
290
- columns=GALLERY_COLUMN_NUM,
291
- object_fit='contain')
292
- gr.Markdown('Pick your favorite generation to upscale.')
293
- with gr.Row():
294
- upscale_to_256_button = gr.Button(
295
- 'Upscale to 256px',
296
- visible=SHOW_UPSCALE_TO_256_BUTTON
297
- or DISABLE_SD_X4_UPSCALER,
298
- interactive=False)
299
- upscale_button = gr.Button('Upscale',
300
- interactive=False,
301
- visible=not DISABLE_SD_X4_UPSCALER)
302
- with gr.Column(visible=False) as upscale_view:
303
- result = gr.Image(label='Result',
304
- show_label=False,
305
- type='filepath',
306
- interactive=False,
307
- elem_id='upscaled-image').style(height=640)
308
- back_to_selection_button = gr.Button('Back to selection')
309
- with gr.Group(elem_id="share-btn-container"):
310
- community_icon = gr.HTML(community_icon_html)
311
- loading_icon = gr.HTML(loading_icon_html)
312
- share_button = gr.Button(
313
- "Share to community", elem_id="share-btn")
314
- share_button.click(None, [], [], _js=share_js)
315
- with gr.Accordion('Advanced options',
316
- open=False,
317
- visible=SHOW_ADVANCED_OPTIONS):
318
- with gr.Tabs():
319
- with gr.Tab(label='Generation'):
320
- seed_1 = gr.Slider(label='Seed',
321
- minimum=0,
322
- maximum=MAX_SEED,
323
- step=1,
324
- value=0)
325
- randomize_seed_1 = gr.Checkbox(label='Randomize seed',
326
- value=True)
327
- guidance_scale_1 = gr.Slider(label='Guidance scale',
328
- minimum=1,
329
- maximum=20,
330
- step=0.1,
331
- value=7.0)
332
- custom_timesteps_1 = gr.Dropdown(
333
- label='Custom timesteps 1',
334
- choices=[
335
- 'none',
336
- 'fast27',
337
- 'smart27',
338
- 'smart50',
339
- 'smart100',
340
- 'smart185',
341
- ],
342
- value=DEFAULT_CUSTOM_TIMESTEPS_1,
343
- visible=SHOW_CUSTOM_TIMESTEPS_1)
344
- num_inference_steps_1 = gr.Slider(
345
- label='Number of inference steps',
346
- minimum=1,
347
- maximum=MAX_NUM_STEPS,
348
- step=1,
349
- value=100,
350
- visible=SHOW_NUM_STEPS_1)
351
- num_images = gr.Slider(label='Number of images',
352
- minimum=1,
353
- maximum=MAX_NUM_IMAGES,
354
- step=1,
355
- value=DEFAULT_NUM_IMAGES,
356
- visible=SHOW_NUM_IMAGES)
357
- with gr.Tab(label='Super-resolution 1'):
358
- seed_2 = gr.Slider(label='Seed',
359
- minimum=0,
360
- maximum=MAX_SEED,
361
- step=1,
362
- value=0)
363
- randomize_seed_2 = gr.Checkbox(label='Randomize seed',
364
- value=True)
365
- guidance_scale_2 = gr.Slider(label='Guidance scale',
366
- minimum=1,
367
- maximum=20,
368
- step=0.1,
369
- value=4.0)
370
- custom_timesteps_2 = gr.Dropdown(
371
- label='Custom timesteps 2',
372
- choices=[
373
- 'none',
374
- 'fast27',
375
- 'smart27',
376
- 'smart50',
377
- 'smart100',
378
- 'smart185',
379
- ],
380
- value=DEFAULT_CUSTOM_TIMESTEPS_2,
381
- visible=SHOW_CUSTOM_TIMESTEPS_2)
382
- num_inference_steps_2 = gr.Slider(
383
- label='Number of inference steps',
384
- minimum=1,
385
- maximum=MAX_NUM_STEPS,
386
- step=1,
387
- value=50,
388
- visible=SHOW_NUM_STEPS_2)
389
- with gr.Tab(label='Super-resolution 2'):
390
- seed_3 = gr.Slider(label='Seed',
391
- minimum=0,
392
- maximum=MAX_SEED,
393
- step=1,
394
- value=0)
395
- randomize_seed_3 = gr.Checkbox(label='Randomize seed',
396
- value=True)
397
- guidance_scale_3 = gr.Slider(label='Guidance scale',
398
- minimum=1,
399
- maximum=20,
400
- step=0.1,
401
- value=9.0)
402
- num_inference_steps_3 = gr.Slider(
403
- label='Number of inference steps',
404
- minimum=1,
405
- maximum=MAX_NUM_STEPS,
406
- step=1,
407
- value=DEFAULT_NUM_STEPS_3,
408
- visible=SHOW_NUM_STEPS_3)
409
-
410
- gr.Examples(examples=examples, inputs=prompt, examples_per_page=4)
411
-
412
- with gr.Box(visible=DEBUG):
413
- with gr.Row():
414
- with gr.Accordion(label='Hidden params'):
415
- stage1_param_path = gr.Text(label='Stage 1 param path')
416
- stage1_result_path = gr.Text(label='Stage 1 result path')
417
- stage1_param_file_hash_name = gr.Text(
418
- label='Stage 1 param file hash name')
419
- selected_index_for_stage2 = gr.Number(
420
- label='Selected index for Stage 2', value=-1, precision=0)
421
- gr.Markdown(DISCLAIMER)
422
- gr.HTML(FOOTER)
423
- stage1_inputs = [
424
- prompt,
425
- negative_prompt,
426
- seed_1,
427
- num_images,
428
- guidance_scale_1,
429
- custom_timesteps_1,
430
- num_inference_steps_1,
431
- ]
432
- stage1_outputs = [
433
- gallery,
434
- stage1_param_path,
435
- stage1_result_path,
436
- ]
437
-
438
- prompt.submit(
439
- fn=randomize_seed_fn,
440
- inputs=[seed_1, randomize_seed_1],
441
- outputs=seed_1,
442
- queue=False,
443
- ).then(
444
- fn=lambda: -1,
445
- outputs=selected_index_for_stage2,
446
- queue=False,
447
- ).then(
448
- fn=show_gallery_view,
449
- outputs=[
450
- gallery_view,
451
- upscale_view,
452
- ],
453
- queue=False,
454
- ).then(
455
- fn=update_upscale_button,
456
- inputs=selected_index_for_stage2,
457
- outputs=[
458
- upscale_button,
459
- upscale_to_256_button,
460
- ],
461
- queue=False,
462
- ).then(
463
- fn=model.run_stage1,
464
- inputs=stage1_inputs,
465
- outputs=stage1_outputs,
466
- ).success(
467
- fn=get_param_file_hash_name,
468
- inputs=stage1_param_path,
469
- outputs=stage1_param_file_hash_name,
470
- queue=False,
471
- ).then(
472
- fn=upload_stage1_result,
473
- inputs=[
474
- stage1_param_path,
475
- stage1_result_path,
476
- stage1_param_file_hash_name,
477
- ],
478
- queue=False,
479
- )
480
-
481
- negative_prompt.submit(
482
- fn=randomize_seed_fn,
483
- inputs=[seed_1, randomize_seed_1],
484
- outputs=seed_1,
485
- queue=False,
486
- ).then(
487
- fn=lambda: -1,
488
- outputs=selected_index_for_stage2,
489
- queue=False,
490
- ).then(
491
- fn=show_gallery_view,
492
- outputs=[
493
- gallery_view,
494
- upscale_view,
495
- ],
496
- queue=False,
497
- ).then(
498
- fn=update_upscale_button,
499
- inputs=selected_index_for_stage2,
500
- outputs=[
501
- upscale_button,
502
- upscale_to_256_button,
503
- ],
504
- queue=False,
505
- ).then(
506
- fn=model.run_stage1,
507
- inputs=stage1_inputs,
508
- outputs=stage1_outputs,
509
- ).success(
510
- fn=get_param_file_hash_name,
511
- inputs=stage1_param_path,
512
- outputs=stage1_param_file_hash_name,
513
- queue=False,
514
- ).then(
515
- fn=upload_stage1_result,
516
- inputs=[
517
- stage1_param_path,
518
- stage1_result_path,
519
- stage1_param_file_hash_name,
520
- ],
521
- queue=False,
522
- )
523
-
524
- generate_button.click(
525
- fn=randomize_seed_fn,
526
- inputs=[seed_1, randomize_seed_1],
527
- outputs=seed_1,
528
- queue=False,
529
- ).then(
530
- fn=lambda: -1,
531
- outputs=selected_index_for_stage2,
532
- queue=False,
533
- ).then(
534
- fn=show_gallery_view,
535
- outputs=[
536
- gallery_view,
537
- upscale_view,
538
- ],
539
- queue=False,
540
- ).then(
541
- fn=update_upscale_button,
542
- inputs=selected_index_for_stage2,
543
- outputs=[
544
- upscale_button,
545
- upscale_to_256_button,
546
- ],
547
- queue=False,
548
- ).then(
549
- fn=model.run_stage1,
550
- inputs=stage1_inputs,
551
- outputs=stage1_outputs,
552
- api_name='generate64',
553
- ).success(
554
- fn=get_param_file_hash_name,
555
- inputs=stage1_param_path,
556
- outputs=stage1_param_file_hash_name,
557
- queue=False,
558
- ).then(
559
- fn=upload_stage1_result,
560
- inputs=[
561
- stage1_param_path,
562
- stage1_result_path,
563
- stage1_param_file_hash_name,
564
- ],
565
- queue=False,
566
- )
567
-
568
- gallery.select(
569
- fn=get_stage2_index,
570
- outputs=selected_index_for_stage2,
571
- queue=False,
572
- )
573
-
574
- selected_index_for_stage2.change(
575
- fn=update_upscale_button,
576
- inputs=selected_index_for_stage2,
577
- outputs=[
578
- upscale_button,
579
- upscale_to_256_button,
580
- ],
581
- queue=False,
582
- )
583
-
584
- stage2_inputs = [
585
- stage1_result_path,
586
- selected_index_for_stage2,
587
- seed_2,
588
- guidance_scale_2,
589
- custom_timesteps_2,
590
- num_inference_steps_2,
591
- ]
592
-
593
- upscale_to_256_button.click(
594
- fn=check_if_stage2_selected,
595
- inputs=selected_index_for_stage2,
596
- queue=False,
597
- ).then(
598
- fn=randomize_seed_fn,
599
- inputs=[seed_2, randomize_seed_2],
600
- outputs=seed_2,
601
- queue=False,
602
- ).then(
603
- fn=show_upscaled_view,
604
- outputs=[
605
- gallery_view,
606
- upscale_view,
607
- ],
608
- queue=False,
609
- ).then(
610
- fn=model.run_stage2,
611
- inputs=stage2_inputs,
612
- outputs=result,
613
- api_name='upscale256',
614
- ).success(
615
- fn=upload_stage2_info,
616
- inputs=[
617
- stage1_param_file_hash_name,
618
- result,
619
- selected_index_for_stage2,
620
- seed_2,
621
- guidance_scale_2,
622
- custom_timesteps_2,
623
- num_inference_steps_2,
624
- ],
625
- queue=False,
626
- )
627
-
628
- stage2_3_inputs = [
629
- stage1_result_path,
630
- selected_index_for_stage2,
631
- seed_2,
632
- guidance_scale_2,
633
- custom_timesteps_2,
634
- num_inference_steps_2,
635
- prompt,
636
- negative_prompt,
637
- seed_3,
638
- guidance_scale_3,
639
- num_inference_steps_3,
640
- ]
641
-
642
- upscale_button.click(
643
- fn=check_if_stage2_selected,
644
- inputs=selected_index_for_stage2,
645
- queue=False,
646
- ).then(
647
- fn=randomize_seed_fn,
648
- inputs=[seed_2, randomize_seed_2],
649
- outputs=seed_2,
650
- queue=False,
651
- ).then(
652
- fn=randomize_seed_fn,
653
- inputs=[seed_3, randomize_seed_3],
654
- outputs=seed_3,
655
- queue=False,
656
- ).then(
657
- fn=show_upscaled_view,
658
- outputs=[
659
- gallery_view,
660
- upscale_view,
661
- ],
662
- queue=False,
663
- ).then(
664
- fn=model.run_stage2_3,
665
- inputs=stage2_3_inputs,
666
- outputs=result,
667
- api_name='upscale1024',
668
- ).success(
669
- fn=upload_stage2_3_info,
670
- inputs=[
671
- stage1_param_file_hash_name,
672
- result,
673
- selected_index_for_stage2,
674
- seed_2,
675
- guidance_scale_2,
676
- custom_timesteps_2,
677
- num_inference_steps_2,
678
- prompt,
679
- negative_prompt,
680
- seed_3,
681
- guidance_scale_3,
682
- num_inference_steps_3,
683
- ],
684
- queue=False,
685
- )
686
-
687
- back_to_selection_button.click(
688
- fn=show_gallery_view,
689
- outputs=[
690
- gallery_view,
691
- upscale_view,
692
- ],
693
- queue=False,
694
- )
695
-
696
- if UPLOAD_REPO_ID:
697
- scheduler = BackgroundScheduler()
698
- scheduler.add_job(func=upload_files, trigger="interval", seconds=60*20)
699
- scheduler.start()
700
-
701
- demo.queue(api_open=False, max_size=MAX_QUEUE_SIZE).launch(debug=DEBUG)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DemocracyStudio/generate_nft_content/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Generate Nft Content
3
- emoji: 🌖
4
- colorFrom: yellow
5
- colorTo: red
6
- sdk: streamlit
7
- sdk_version: 1.10.0
8
- app_file: app.py
9
- pinned: false
10
- license: cc
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DenniSciFi/IconAutomation/app.py DELETED
@@ -1,136 +0,0 @@
1
- import gradio as gr
2
- import requests
3
-
4
- def get_organization_page(notion_url, notion_token, database_id):
5
- """
6
- Query the Notion database for the organization page with the specified URL.
7
-
8
- :param notion_url: The URL of the organization page in the Notion database.
9
- :param notion_token: The Notion API token.
10
- :param database_id: The ID of the Notion database.
11
- :return: The organization page object if found, otherwise None.
12
- """
13
-
14
- # Set headers for Notion API requests
15
- headers = {
16
- "Authorization": "Bearer " + notion_token,
17
- "Notion-Version": "2022-06-28",
18
- "Content-Type": "application/json"
19
- }
20
-
21
- # Construct the API endpoint URL using the provided database ID
22
- url = f"https://api.notion.com/v1/databases/{database_id}/query"
23
-
24
- # Define the query filter for the Notion API request
25
- data = {
26
- "filter": {
27
- "property": "URL",
28
- "url": {
29
- "equals": notion_url
30
- }
31
- }
32
- }
33
-
34
- # Send a POST request to the Notion API with the constructed URL, query filter, and headers
35
- response = requests.post(url, json=data, headers=headers)
36
- response_json = response.json()
37
-
38
- results = response_json["results"]
39
-
40
- # Return the first organization page object in the results array, or None if there are no results
41
- if len(results) > 0:
42
- return results[0]
43
- else:
44
- return None
45
-
46
-
47
- def update_opportunity_icon(opportunity_page_id, image_url, notion_token):
48
- """
49
- Update the icon of a specific opportunity page with an image URL.
50
-
51
- :param opportunity_page_id: The ID of the opportunity page in the Notion database.
52
- :param image_url: The URL of the new image to be set as the icon.
53
- :param notion_token: The Notion API token.
54
- :return: True if the update was successful, False otherwise.
55
- """
56
-
57
- # Set headers for Notion API requests
58
- headers = {
59
- "Authorization": "Bearer " + notion_token,
60
- "Notion-Version": "2022-06-28",
61
- "Content-Type": "application/json"
62
- }
63
-
64
- # Set the URL of the Notion API endpoint for updating the page with the given ID
65
- url = f"https://api.notion.com/v1/pages/{opportunity_page_id}"
66
-
67
- # Define the data to be sent in the request body, including the new image URL
68
- data = {
69
- "icon": {
70
- "type": "external",
71
- "external": {
72
- "url": image_url
73
- }
74
- }
75
- }
76
-
77
- # Send a PATCH request to the Notion API to update the page with the new image URL
78
- response = requests.patch(url, json=data, headers=headers)
79
-
80
- # Return True if the response status code indicates success, False otherwise
81
- return response.status_code == 200
82
-
83
-
84
- def update_related_opportunities_icons(notion_url, image_url, notion_token, database_id):
85
- """
86
- Update the icons of all opportunities related to an organization page.
87
-
88
- :param notion_url: The URL of the organization page in the Notion database.
89
- :param image_url: The URL of the new image to be set as the icon.
90
- :param notion_token: The Notion API token.
91
- :param database_id: The ID of the Notion database.
92
- """
93
-
94
- # Get the organization page object from the Notion database using the URL
95
- organization_page = get_organization_page(notion_url, notion_token, database_id)
96
-
97
- # If the organization page is found
98
- if organization_page:
99
- # Extract the list of related opportunities from the page object
100
- related_opportunities = organization_page["properties"]["Related opportunities"]["relation"]
101
-
102
- # For each related opportunity
103
- for opportunity in related_opportunities:
104
- # Get the opportunity page ID
105
- opportunity_page_id = opportunity["id"]
106
-
107
- # Try to update the opportunity icon with the new image URL using the update_opportunity_icon() function
108
- if update_opportunity_icon(opportunity_page_id, image_url, notion_token):
109
- print(f"Updated icon for opportunity page: {opportunity_page_id}")
110
- else:
111
- print(f"Failed to update icon for opportunity page: {opportunity_page_id}")
112
- else:
113
- print("Organization page not found.")
114
-
115
-
116
- def interface(notion_url, image_url, notion_token, database_id):
117
- # Calling the update_related_opportunities_icons function with the provided URLs and user inputs
118
- update_related_opportunities_icons(notion_url, image_url, notion_token, database_id)
119
- return "Operation Completed"
120
-
121
-
122
- iface = gr.Interface(
123
- fn=interface,
124
- inputs=[
125
- gr.inputs.Textbox(label="Notion URL"),
126
- gr.inputs.Textbox(label="Image URL"),
127
- gr.inputs.Textbox(label="Notion Token"),
128
- gr.inputs.Textbox(label="Database ID"),
129
- ],
130
- outputs=gr.outputs.Textbox(),
131
- title="Icon Automation for Notion",
132
- description="Enter the Notion URL of the organization page, the image URL you want to set as the icon, "
133
- "and your Notion API token along with the database ID. This will update the icons of all related opportunities.",
134
- )
135
-
136
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DhruvShek/chatlm/app.py DELETED
@@ -1,171 +0,0 @@
1
-
2
- import streamlit as st
3
- from streamlit_chat import message
4
- import json
5
- import torch
6
- from torch.utils.data import Dataset
7
- import torch.utils.data
8
- from models import *
9
- from utils import *
10
- # Setting page title and header
11
- st.set_page_config(page_title="UniLM", page_icon=":robot_face:")
12
- st.markdown("<h1 style='text-align: center;'>UniLM</h1>", unsafe_allow_html=True)
13
-
14
-
15
-
16
- # Initialise session state variables
17
- if 'generated' not in st.session_state:
18
- st.session_state['generated'] = []
19
- if 'past' not in st.session_state:
20
- st.session_state['past'] = []
21
- if 'messages' not in st.session_state:
22
- st.session_state['messages'] = [
23
- {"role": "system", "content": "You are a helpful assistant."}
24
- ]
25
- if 'model_name' not in st.session_state:
26
- st.session_state['model_name'] = []
27
- if 'cost' not in st.session_state:
28
- st.session_state['cost'] = []
29
- if 'total_tokens' not in st.session_state:
30
- st.session_state['total_tokens'] = []
31
- if 'total_cost' not in st.session_state:
32
- st.session_state['total_cost'] = 1
33
-
34
- # Sidebar - let user choose model, show total cost of current conversation, and let user clear the current conversation
35
- st.sidebar.title("Settings")
36
- model_name = st.sidebar.selectbox("Model:", ("30M_6.1K","NONE"))
37
- counter_placeholder = st.sidebar.empty()
38
-
39
- clear_button = st.sidebar.button("Clear Conversation", key="clear")
40
-
41
- # Map model names to OpenAI model IDs
42
- if model_name == "30M_6.1K":
43
- model = "30M_6.1K"
44
- else:
45
- model = "gpt-4"
46
-
47
- # reset everything
48
- if clear_button:
49
- st.session_state['generated'] = []
50
- st.session_state['past'] = []
51
- st.session_state['messages'] = [
52
- {"role": "system", "content": "You are a helpful assistant."}
53
- ]
54
- st.session_state['number_tokens'] = []
55
- st.session_state['model_name'] = []
56
- st.session_state['cost'] = []
57
- st.session_state['total_cost'] = 0.0
58
- st.session_state['total_tokens'] = []
59
-
60
-
61
-
62
- def evaluate(transformer, question, question_mask, max_len, word_map):
63
- """
64
- Performs Greedy Decoding with a batch size of 1
65
- """
66
- rev_word_map = {v: k for k, v in word_map.items()}
67
- transformer.eval()
68
- start_token = word_map['<start>']
69
- encoded = transformer.encode(question, question_mask)
70
- words = torch.LongTensor([[start_token]]).to(device)
71
-
72
- for step in range(max_len - 1):
73
- size = words.shape[1]
74
- target_mask = torch.triu(torch.ones(size, size)).transpose(0, 1).type(dtype=torch.uint8)
75
- target_mask = target_mask.to(device).unsqueeze(0).unsqueeze(0)
76
- decoded = transformer.decode(words, target_mask, encoded, question_mask)
77
- predictions = transformer.logit(decoded[:, -1])
78
- _, next_word = torch.max(predictions, dim=1)
79
- next_word = next_word.item()
80
- if next_word == word_map['<end>']:
81
- break
82
- words = torch.cat([words, torch.LongTensor([[next_word]]).to(device)], dim=1) # (1,step+2)
83
-
84
- # Construct Sentence
85
- if words.dim() == 2:
86
- words = words.squeeze(0)
87
- words = words.tolist()
88
-
89
- sen_idx = [w for w in words if w not in {word_map['<start>']}]
90
- sentence = ' '.join([rev_word_map[sen_idx[k]] for k in range(len(sen_idx))])
91
-
92
- return sentence
93
- def remove_punc(string):
94
- punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
95
- no_punct = ""
96
- for char in string:
97
- if char not in punctuations:
98
- no_punct = no_punct + char # space is also a character
99
- return no_punct.lower()
100
-
101
- if model_name == "30M_6.1K":
102
- load_checkpoint = True
103
- ckpt_path = 'checkpoint_190.pth.tar'
104
- with open('WORDMAP_corpus.json', 'r') as j:
105
- word_map = json.load(j)
106
- if load_checkpoint:
107
- checkpoint = torch.load(ckpt_path, map_location=torch.device('cpu'))
108
- transformer = checkpoint['transformer']
109
- else:
110
- load_checkpoint = True
111
- ckpt_path = 'checkpoint_190.pth.tar'
112
- with open('WORDMAP_corpus.json', 'r') as j:
113
- word_map = json.load(j)
114
- if load_checkpoint:
115
- checkpoint = torch.load(ckpt_path, map_location=torch.device('cpu'))
116
- transformer = checkpoint['transformer']
117
-
118
-
119
-
120
- # generate a response
121
- def generate_response(prompt):
122
- st.session_state['messages'].append({"role": "user", "content": prompt})
123
- question = remove_punc(prompt)
124
-
125
- max_len = 153
126
- enc_qus = [word_map.get(word, word_map['<unk>']) for word in question.split()]
127
- question = torch.LongTensor(enc_qus).to(device).unsqueeze(0)
128
- question_mask = (question != 0).to(device).unsqueeze(1).unsqueeze(1)
129
- sentence = evaluate(transformer, question, question_mask, int(max_len), word_map)
130
-
131
- response = sentence
132
- st.session_state['messages'].append({"role": "assistant", "content": response})
133
-
134
- # print(st.session_state['messages'])
135
- total_tokens = "153"
136
- prompt_tokens = "153"
137
- completion_tokens = "153"
138
- return response, total_tokens, prompt_tokens, completion_tokens
139
-
140
-
141
- # container for chat history
142
- response_container = st.container()
143
- # container for text box
144
- container = st.container()
145
-
146
- with container:
147
- with st.form(key='my_form', clear_on_submit=True):
148
- user_input = st.text_area("You:", key='input', height=2)
149
- submit_button = st.form_submit_button(label='✉')
150
-
151
- if submit_button and user_input:
152
- output, total_tokens, prompt_tokens, completion_tokens = generate_response(user_input)
153
- st.session_state['past'].append(user_input)
154
- st.session_state['generated'].append(output)
155
- st.session_state['model_name'].append(model_name)
156
- st.session_state['total_tokens'].append(total_tokens)
157
-
158
- # from https://openai.com/pricing#language-models
159
- if model_name == "30M_6.1K":
160
- cost = "1"
161
- else:
162
- cost = "2"
163
-
164
-
165
-
166
- if st.session_state['generated']:
167
- with response_container:
168
- for i in range(len(st.session_state['generated'])):
169
- message(st.session_state["past"][i], is_user=True, key=str(i) + '_user')
170
- message(st.session_state["generated"][i], key=str(i))
171
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Didier/Semantic_Search_arXiv/README.md DELETED
@@ -1,37 +0,0 @@
1
- ---
2
- title: Semantic_Search_arXiv
3
- emoji: 👁
4
- colorFrom: yellow
5
- colorTo: blue
6
- sdk: streamlit
7
- app_file: app.py
8
- pinned: false
9
- ---
10
-
11
- # Configuration
12
-
13
- `title`: _string_
14
- Display title for the Space
15
-
16
- `emoji`: _string_
17
- Space emoji (emoji-only character allowed)
18
-
19
- `colorFrom`: _string_
20
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
21
-
22
- `colorTo`: _string_
23
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
24
-
25
- `sdk`: _string_
26
- Can be either `gradio`, `streamlit`, or `static`
27
-
28
- `sdk_version` : _string_
29
- Only applicable for `streamlit` SDK.
30
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
31
-
32
- `app_file`: _string_
33
- Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
34
- Path is relative to the root of the repository.
35
-
36
- `pinned`: _boolean_
37
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dinoking/Guccio-AI-Designer/models/stylegan2/stylegan2-pytorch/op/fused_bias_act.cpp DELETED
@@ -1,21 +0,0 @@
1
- #include <torch/extension.h>
2
-
3
-
4
- torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
5
- int act, int grad, float alpha, float scale);
6
-
7
- #define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor")
8
- #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
9
- #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)
10
-
11
- torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,
12
- int act, int grad, float alpha, float scale) {
13
- CHECK_CUDA(input);
14
- CHECK_CUDA(bias);
15
-
16
- return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale);
17
- }
18
-
19
- PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
20
- m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)");
21
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan/stylegan_human/torch_utils/ops/filtered_lrelu.py DELETED
@@ -1,282 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- import os
10
- import numpy as np
11
- import torch
12
- import warnings
13
-
14
- from .. import custom_ops
15
- from .. import misc
16
- from . import upfirdn2d
17
- from . import bias_act
18
-
19
- #----------------------------------------------------------------------------
20
-
21
- _plugin = None
22
-
23
- def _init():
24
- global _plugin
25
- if _plugin is None:
26
-
27
- # sources=['filtered_lrelu.h', 'filtered_lrelu.cu', 'filtered_lrelu.cpp', 'filtered_lrelu_wr.cu', 'filtered_lrelu_rd.cu', 'filtered_lrelu_ns.cu']
28
- # sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
29
- # try:
30
- # _plugin = custom_ops.get_plugin('filtered_lrelu_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math', '--allow-unsupported-compiler'])
31
- # except:
32
- # warnings.warn('Failed to build CUDA kernels for filtered_lrelu_plugin. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc())
33
-
34
- _plugin = custom_ops.get_plugin_v3(
35
- module_name='filtered_lrelu_plugin',
36
- sources=['filtered_lrelu.cpp', 'filtered_lrelu_wr.cu', 'filtered_lrelu_rd.cu', 'filtered_lrelu_ns.cu'],
37
- headers=['filtered_lrelu.h', 'filtered_lrelu.cu'],
38
- source_dir=os.path.dirname(__file__),
39
- extra_cuda_cflags=['--use_fast_math', '--allow-unsupported-compiler'],
40
- )
41
- return True
42
-
43
- def _get_filter_size(f):
44
- if f is None:
45
- return 1, 1
46
- assert isinstance(f, torch.Tensor)
47
- assert 1 <= f.ndim <= 2
48
- return f.shape[-1], f.shape[0] # width, height
49
-
50
- def _parse_padding(padding):
51
- if isinstance(padding, int):
52
- padding = [padding, padding]
53
- assert isinstance(padding, (list, tuple))
54
- assert all(isinstance(x, (int, np.integer)) for x in padding)
55
- padding = [int(x) for x in padding]
56
- if len(padding) == 2:
57
- px, py = padding
58
- padding = [px, px, py, py]
59
- px0, px1, py0, py1 = padding
60
- return px0, px1, py0, py1
61
-
62
- #----------------------------------------------------------------------------
63
-
64
- def filtered_lrelu(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False, impl='cuda'):
65
- r"""Filtered leaky ReLU for a batch of 2D images.
66
-
67
- Performs the following sequence of operations for each channel:
68
-
69
- 1. Add channel-specific bias if provided (`b`).
70
-
71
- 2. Upsample the image by inserting N-1 zeros after each pixel (`up`).
72
-
73
- 3. Pad the image with the specified number of zeros on each side (`padding`).
74
- Negative padding corresponds to cropping the image.
75
-
76
- 4. Convolve the image with the specified upsampling FIR filter (`fu`), shrinking it
77
- so that the footprint of all output pixels lies within the input image.
78
-
79
- 5. Multiply each value by the provided gain factor (`gain`).
80
-
81
- 6. Apply leaky ReLU activation function to each value.
82
-
83
- 7. Clamp each value between -clamp and +clamp, if `clamp` parameter is provided.
84
-
85
- 8. Convolve the image with the specified downsampling FIR filter (`fd`), shrinking
86
- it so that the footprint of all output pixels lies within the input image.
87
-
88
- 9. Downsample the image by keeping every Nth pixel (`down`).
89
-
90
- The fused op is considerably more efficient than performing the same calculation
91
- using standard PyTorch ops. It supports gradients of arbitrary order.
92
-
93
- Args:
94
- x: Float32/float16/float64 input tensor of the shape
95
- `[batch_size, num_channels, in_height, in_width]`.
96
- fu: Float32 upsampling FIR filter of the shape
97
- `[filter_height, filter_width]` (non-separable),
98
- `[filter_taps]` (separable), or
99
- `None` (identity).
100
- fd: Float32 downsampling FIR filter of the shape
101
- `[filter_height, filter_width]` (non-separable),
102
- `[filter_taps]` (separable), or
103
- `None` (identity).
104
- b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
105
- as `x`. The length of vector must must match the channel dimension of `x`.
106
- up: Integer upsampling factor (default: 1).
107
- down: Integer downsampling factor. (default: 1).
108
- padding: Padding with respect to the upsampled image. Can be a single number
109
- or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
110
- (default: 0).
111
- gain: Overall scaling factor for signal magnitude (default: sqrt(2)).
112
- slope: Slope on the negative side of leaky ReLU (default: 0.2).
113
- clamp: Maximum magnitude for leaky ReLU output (default: None).
114
- flip_filter: False = convolution, True = correlation (default: False).
115
- impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
116
-
117
- Returns:
118
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
119
- """
120
- assert isinstance(x, torch.Tensor)
121
- assert impl in ['ref', 'cuda']
122
- if impl == 'cuda' and x.device.type == 'cuda' and _init():
123
- return _filtered_lrelu_cuda(up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter).apply(x, fu, fd, b, None, 0, 0)
124
- return _filtered_lrelu_ref(x, fu=fu, fd=fd, b=b, up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter)
125
-
126
- #----------------------------------------------------------------------------
127
-
128
- @misc.profiled_function
129
- def _filtered_lrelu_ref(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False):
130
- """Slow and memory-inefficient reference implementation of `filtered_lrelu()` using
131
- existing `upfirdn2n()` and `bias_act()` ops.
132
- """
133
- assert isinstance(x, torch.Tensor) and x.ndim == 4
134
- fu_w, fu_h = _get_filter_size(fu)
135
- fd_w, fd_h = _get_filter_size(fd)
136
- if b is not None:
137
- assert isinstance(b, torch.Tensor) and b.dtype == x.dtype
138
- misc.assert_shape(b, [x.shape[1]])
139
- assert isinstance(up, int) and up >= 1
140
- assert isinstance(down, int) and down >= 1
141
- px0, px1, py0, py1 = _parse_padding(padding)
142
- assert gain == float(gain) and gain > 0
143
- assert slope == float(slope) and slope >= 0
144
- assert clamp is None or (clamp == float(clamp) and clamp >= 0)
145
-
146
- # Calculate output size.
147
- batch_size, channels, in_h, in_w = x.shape
148
- in_dtype = x.dtype
149
- out_w = (in_w * up + (px0 + px1) - (fu_w - 1) - (fd_w - 1) + (down - 1)) // down
150
- out_h = (in_h * up + (py0 + py1) - (fu_h - 1) - (fd_h - 1) + (down - 1)) // down
151
-
152
- # Compute using existing ops.
153
- x = bias_act.bias_act(x=x, b=b) # Apply bias.
154
- x = upfirdn2d.upfirdn2d(x=x, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample.
155
- x = bias_act.bias_act(x=x, act='lrelu', alpha=slope, gain=gain, clamp=clamp) # Bias, leaky ReLU, clamp.
156
- x = upfirdn2d.upfirdn2d(x=x, f=fd, down=down, flip_filter=flip_filter) # Downsample.
157
-
158
- # Check output shape & dtype.
159
- misc.assert_shape(x, [batch_size, channels, out_h, out_w])
160
- assert x.dtype == in_dtype
161
- return x
162
-
163
- #----------------------------------------------------------------------------
164
-
165
- _filtered_lrelu_cuda_cache = dict()
166
-
167
- def _filtered_lrelu_cuda(up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False):
168
- """Fast CUDA implementation of `filtered_lrelu()` using custom ops.
169
- """
170
- assert isinstance(up, int) and up >= 1
171
- assert isinstance(down, int) and down >= 1
172
- px0, px1, py0, py1 = _parse_padding(padding)
173
- assert gain == float(gain) and gain > 0
174
- gain = float(gain)
175
- assert slope == float(slope) and slope >= 0
176
- slope = float(slope)
177
- assert clamp is None or (clamp == float(clamp) and clamp >= 0)
178
- clamp = float(clamp if clamp is not None else 'inf')
179
-
180
- # Lookup from cache.
181
- key = (up, down, px0, px1, py0, py1, gain, slope, clamp, flip_filter)
182
- if key in _filtered_lrelu_cuda_cache:
183
- return _filtered_lrelu_cuda_cache[key]
184
-
185
- # Forward op.
186
- class FilteredLReluCuda(torch.autograd.Function):
187
- @staticmethod
188
- def forward(ctx, x, fu, fd, b, si, sx, sy): # pylint: disable=arguments-differ
189
- assert isinstance(x, torch.Tensor) and x.ndim == 4
190
-
191
- # Replace empty up/downsample kernels with full 1x1 kernels (faster than separable).
192
- if fu is None:
193
- fu = torch.ones([1, 1], dtype=torch.float32, device=x.device)
194
- if fd is None:
195
- fd = torch.ones([1, 1], dtype=torch.float32, device=x.device)
196
- assert 1 <= fu.ndim <= 2
197
- assert 1 <= fd.ndim <= 2
198
-
199
- # Replace separable 1x1 kernels with full 1x1 kernels when scale factor is 1.
200
- if up == 1 and fu.ndim == 1 and fu.shape[0] == 1:
201
- fu = fu.square()[None]
202
- if down == 1 and fd.ndim == 1 and fd.shape[0] == 1:
203
- fd = fd.square()[None]
204
-
205
- # Missing sign input tensor.
206
- if si is None:
207
- si = torch.empty([0])
208
-
209
- # Missing bias tensor.
210
- if b is None:
211
- b = torch.zeros([x.shape[1]], dtype=x.dtype, device=x.device)
212
-
213
- # Construct internal sign tensor only if gradients are needed.
214
- write_signs = (si.numel() == 0) and (x.requires_grad or b.requires_grad)
215
-
216
- # Warn if input storage strides are not in decreasing order due to e.g. channels-last layout.
217
- strides = [x.stride(i) for i in range(x.ndim) if x.size(i) > 1]
218
- if any(a < b for a, b in zip(strides[:-1], strides[1:])):
219
- warnings.warn("low-performance memory layout detected in filtered_lrelu input", RuntimeWarning)
220
-
221
- # Call C++/Cuda plugin if datatype is supported.
222
- if x.dtype in [torch.float16, torch.float32]:
223
- if torch.cuda.current_stream(x.device) != torch.cuda.default_stream(x.device):
224
- warnings.warn("filtered_lrelu called with non-default cuda stream but concurrent execution is not supported", RuntimeWarning)
225
- y, so, return_code = _plugin.filtered_lrelu(x, fu, fd, b, si, up, down, px0, px1, py0, py1, sx, sy, gain, slope, clamp, flip_filter, write_signs)
226
- else:
227
- return_code = -1
228
-
229
- # No Cuda kernel found? Fall back to generic implementation. Still more memory efficient than the reference implementation because
230
- # only the bit-packed sign tensor is retained for gradient computation.
231
- if return_code < 0:
232
- warnings.warn("filtered_lrelu called with parameters that have no optimized CUDA kernel, using generic fallback", RuntimeWarning)
233
-
234
- y = x.add(b.unsqueeze(-1).unsqueeze(-1)) # Add bias.
235
- y = upfirdn2d.upfirdn2d(x=y, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample.
236
- so = _plugin.filtered_lrelu_act_(y, si, sx, sy, gain, slope, clamp, write_signs) # Activation function and sign handling. Modifies y in-place.
237
- y = upfirdn2d.upfirdn2d(x=y, f=fd, down=down, flip_filter=flip_filter) # Downsample.
238
-
239
- # Prepare for gradient computation.
240
- ctx.save_for_backward(fu, fd, (si if si.numel() else so))
241
- ctx.x_shape = x.shape
242
- ctx.y_shape = y.shape
243
- ctx.s_ofs = sx, sy
244
- return y
245
-
246
- @staticmethod
247
- def backward(ctx, dy): # pylint: disable=arguments-differ
248
- fu, fd, si = ctx.saved_tensors
249
- _, _, xh, xw = ctx.x_shape
250
- _, _, yh, yw = ctx.y_shape
251
- sx, sy = ctx.s_ofs
252
- dx = None # 0
253
- dfu = None; assert not ctx.needs_input_grad[1]
254
- dfd = None; assert not ctx.needs_input_grad[2]
255
- db = None # 3
256
- dsi = None; assert not ctx.needs_input_grad[4]
257
- dsx = None; assert not ctx.needs_input_grad[5]
258
- dsy = None; assert not ctx.needs_input_grad[6]
259
-
260
- if ctx.needs_input_grad[0] or ctx.needs_input_grad[3]:
261
- pp = [
262
- (fu.shape[-1] - 1) + (fd.shape[-1] - 1) - px0,
263
- xw * up - yw * down + px0 - (up - 1),
264
- (fu.shape[0] - 1) + (fd.shape[0] - 1) - py0,
265
- xh * up - yh * down + py0 - (up - 1),
266
- ]
267
- gg = gain * (up ** 2) / (down ** 2)
268
- ff = (not flip_filter)
269
- sx = sx - (fu.shape[-1] - 1) + px0
270
- sy = sy - (fu.shape[0] - 1) + py0
271
- dx = _filtered_lrelu_cuda(up=down, down=up, padding=pp, gain=gg, slope=slope, clamp=None, flip_filter=ff).apply(dy, fd, fu, None, si, sx, sy)
272
-
273
- if ctx.needs_input_grad[3]:
274
- db = dx.sum([0, 2, 3])
275
-
276
- return dx, dfu, dfd, db, dsi, dsx, dsy
277
-
278
- # Add to cache.
279
- _filtered_lrelu_cuda_cache[key] = FilteredLReluCuda
280
- return FilteredLReluCuda
281
-
282
- #----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/src/utils.cpp DELETED
@@ -1,429 +0,0 @@
1
- #include "BYTETracker.h"
2
- #include "lapjv.h"
3
-
4
- vector<STrack*> BYTETracker::joint_stracks(vector<STrack*> &tlista, vector<STrack> &tlistb)
5
- {
6
- map<int, int> exists;
7
- vector<STrack*> res;
8
- for (int i = 0; i < tlista.size(); i++)
9
- {
10
- exists.insert(pair<int, int>(tlista[i]->track_id, 1));
11
- res.push_back(tlista[i]);
12
- }
13
- for (int i = 0; i < tlistb.size(); i++)
14
- {
15
- int tid = tlistb[i].track_id;
16
- if (!exists[tid] || exists.count(tid) == 0)
17
- {
18
- exists[tid] = 1;
19
- res.push_back(&tlistb[i]);
20
- }
21
- }
22
- return res;
23
- }
24
-
25
- vector<STrack> BYTETracker::joint_stracks(vector<STrack> &tlista, vector<STrack> &tlistb)
26
- {
27
- map<int, int> exists;
28
- vector<STrack> res;
29
- for (int i = 0; i < tlista.size(); i++)
30
- {
31
- exists.insert(pair<int, int>(tlista[i].track_id, 1));
32
- res.push_back(tlista[i]);
33
- }
34
- for (int i = 0; i < tlistb.size(); i++)
35
- {
36
- int tid = tlistb[i].track_id;
37
- if (!exists[tid] || exists.count(tid) == 0)
38
- {
39
- exists[tid] = 1;
40
- res.push_back(tlistb[i]);
41
- }
42
- }
43
- return res;
44
- }
45
-
46
- vector<STrack> BYTETracker::sub_stracks(vector<STrack> &tlista, vector<STrack> &tlistb)
47
- {
48
- map<int, STrack> stracks;
49
- for (int i = 0; i < tlista.size(); i++)
50
- {
51
- stracks.insert(pair<int, STrack>(tlista[i].track_id, tlista[i]));
52
- }
53
- for (int i = 0; i < tlistb.size(); i++)
54
- {
55
- int tid = tlistb[i].track_id;
56
- if (stracks.count(tid) != 0)
57
- {
58
- stracks.erase(tid);
59
- }
60
- }
61
-
62
- vector<STrack> res;
63
- std::map<int, STrack>::iterator it;
64
- for (it = stracks.begin(); it != stracks.end(); ++it)
65
- {
66
- res.push_back(it->second);
67
- }
68
-
69
- return res;
70
- }
71
-
72
- void BYTETracker::remove_duplicate_stracks(vector<STrack> &resa, vector<STrack> &resb, vector<STrack> &stracksa, vector<STrack> &stracksb)
73
- {
74
- vector<vector<float> > pdist = iou_distance(stracksa, stracksb);
75
- vector<pair<int, int> > pairs;
76
- for (int i = 0; i < pdist.size(); i++)
77
- {
78
- for (int j = 0; j < pdist[i].size(); j++)
79
- {
80
- if (pdist[i][j] < 0.15)
81
- {
82
- pairs.push_back(pair<int, int>(i, j));
83
- }
84
- }
85
- }
86
-
87
- vector<int> dupa, dupb;
88
- for (int i = 0; i < pairs.size(); i++)
89
- {
90
- int timep = stracksa[pairs[i].first].frame_id - stracksa[pairs[i].first].start_frame;
91
- int timeq = stracksb[pairs[i].second].frame_id - stracksb[pairs[i].second].start_frame;
92
- if (timep > timeq)
93
- dupb.push_back(pairs[i].second);
94
- else
95
- dupa.push_back(pairs[i].first);
96
- }
97
-
98
- for (int i = 0; i < stracksa.size(); i++)
99
- {
100
- vector<int>::iterator iter = find(dupa.begin(), dupa.end(), i);
101
- if (iter == dupa.end())
102
- {
103
- resa.push_back(stracksa[i]);
104
- }
105
- }
106
-
107
- for (int i = 0; i < stracksb.size(); i++)
108
- {
109
- vector<int>::iterator iter = find(dupb.begin(), dupb.end(), i);
110
- if (iter == dupb.end())
111
- {
112
- resb.push_back(stracksb[i]);
113
- }
114
- }
115
- }
116
-
117
- void BYTETracker::linear_assignment(vector<vector<float> > &cost_matrix, int cost_matrix_size, int cost_matrix_size_size, float thresh,
118
- vector<vector<int> > &matches, vector<int> &unmatched_a, vector<int> &unmatched_b)
119
- {
120
- if (cost_matrix.size() == 0)
121
- {
122
- for (int i = 0; i < cost_matrix_size; i++)
123
- {
124
- unmatched_a.push_back(i);
125
- }
126
- for (int i = 0; i < cost_matrix_size_size; i++)
127
- {
128
- unmatched_b.push_back(i);
129
- }
130
- return;
131
- }
132
-
133
- vector<int> rowsol; vector<int> colsol;
134
- float c = lapjv(cost_matrix, rowsol, colsol, true, thresh);
135
- for (int i = 0; i < rowsol.size(); i++)
136
- {
137
- if (rowsol[i] >= 0)
138
- {
139
- vector<int> match;
140
- match.push_back(i);
141
- match.push_back(rowsol[i]);
142
- matches.push_back(match);
143
- }
144
- else
145
- {
146
- unmatched_a.push_back(i);
147
- }
148
- }
149
-
150
- for (int i = 0; i < colsol.size(); i++)
151
- {
152
- if (colsol[i] < 0)
153
- {
154
- unmatched_b.push_back(i);
155
- }
156
- }
157
- }
158
-
159
- vector<vector<float> > BYTETracker::ious(vector<vector<float> > &atlbrs, vector<vector<float> > &btlbrs)
160
- {
161
- vector<vector<float> > ious;
162
- if (atlbrs.size()*btlbrs.size() == 0)
163
- return ious;
164
-
165
- ious.resize(atlbrs.size());
166
- for (int i = 0; i < ious.size(); i++)
167
- {
168
- ious[i].resize(btlbrs.size());
169
- }
170
-
171
- //bbox_ious
172
- for (int k = 0; k < btlbrs.size(); k++)
173
- {
174
- vector<float> ious_tmp;
175
- float box_area = (btlbrs[k][2] - btlbrs[k][0] + 1)*(btlbrs[k][3] - btlbrs[k][1] + 1);
176
- for (int n = 0; n < atlbrs.size(); n++)
177
- {
178
- float iw = min(atlbrs[n][2], btlbrs[k][2]) - max(atlbrs[n][0], btlbrs[k][0]) + 1;
179
- if (iw > 0)
180
- {
181
- float ih = min(atlbrs[n][3], btlbrs[k][3]) - max(atlbrs[n][1], btlbrs[k][1]) + 1;
182
- if(ih > 0)
183
- {
184
- float ua = (atlbrs[n][2] - atlbrs[n][0] + 1)*(atlbrs[n][3] - atlbrs[n][1] + 1) + box_area - iw * ih;
185
- ious[n][k] = iw * ih / ua;
186
- }
187
- else
188
- {
189
- ious[n][k] = 0.0;
190
- }
191
- }
192
- else
193
- {
194
- ious[n][k] = 0.0;
195
- }
196
- }
197
- }
198
-
199
- return ious;
200
- }
201
-
202
- vector<vector<float> > BYTETracker::iou_distance(vector<STrack*> &atracks, vector<STrack> &btracks, int &dist_size, int &dist_size_size)
203
- {
204
- vector<vector<float> > cost_matrix;
205
- if (atracks.size() * btracks.size() == 0)
206
- {
207
- dist_size = atracks.size();
208
- dist_size_size = btracks.size();
209
- return cost_matrix;
210
- }
211
- vector<vector<float> > atlbrs, btlbrs;
212
- for (int i = 0; i < atracks.size(); i++)
213
- {
214
- atlbrs.push_back(atracks[i]->tlbr);
215
- }
216
- for (int i = 0; i < btracks.size(); i++)
217
- {
218
- btlbrs.push_back(btracks[i].tlbr);
219
- }
220
-
221
- dist_size = atracks.size();
222
- dist_size_size = btracks.size();
223
-
224
- vector<vector<float> > _ious = ious(atlbrs, btlbrs);
225
-
226
- for (int i = 0; i < _ious.size();i++)
227
- {
228
- vector<float> _iou;
229
- for (int j = 0; j < _ious[i].size(); j++)
230
- {
231
- _iou.push_back(1 - _ious[i][j]);
232
- }
233
- cost_matrix.push_back(_iou);
234
- }
235
-
236
- return cost_matrix;
237
- }
238
-
239
- vector<vector<float> > BYTETracker::iou_distance(vector<STrack> &atracks, vector<STrack> &btracks)
240
- {
241
- vector<vector<float> > atlbrs, btlbrs;
242
- for (int i = 0; i < atracks.size(); i++)
243
- {
244
- atlbrs.push_back(atracks[i].tlbr);
245
- }
246
- for (int i = 0; i < btracks.size(); i++)
247
- {
248
- btlbrs.push_back(btracks[i].tlbr);
249
- }
250
-
251
- vector<vector<float> > _ious = ious(atlbrs, btlbrs);
252
- vector<vector<float> > cost_matrix;
253
- for (int i = 0; i < _ious.size(); i++)
254
- {
255
- vector<float> _iou;
256
- for (int j = 0; j < _ious[i].size(); j++)
257
- {
258
- _iou.push_back(1 - _ious[i][j]);
259
- }
260
- cost_matrix.push_back(_iou);
261
- }
262
-
263
- return cost_matrix;
264
- }
265
-
266
- double BYTETracker::lapjv(const vector<vector<float> > &cost, vector<int> &rowsol, vector<int> &colsol,
267
- bool extend_cost, float cost_limit, bool return_cost)
268
- {
269
- vector<vector<float> > cost_c;
270
- cost_c.assign(cost.begin(), cost.end());
271
-
272
- vector<vector<float> > cost_c_extended;
273
-
274
- int n_rows = cost.size();
275
- int n_cols = cost[0].size();
276
- rowsol.resize(n_rows);
277
- colsol.resize(n_cols);
278
-
279
- int n = 0;
280
- if (n_rows == n_cols)
281
- {
282
- n = n_rows;
283
- }
284
- else
285
- {
286
- if (!extend_cost)
287
- {
288
- cout << "set extend_cost=True" << endl;
289
- system("pause");
290
- exit(0);
291
- }
292
- }
293
-
294
- if (extend_cost || cost_limit < LONG_MAX)
295
- {
296
- n = n_rows + n_cols;
297
- cost_c_extended.resize(n);
298
- for (int i = 0; i < cost_c_extended.size(); i++)
299
- cost_c_extended[i].resize(n);
300
-
301
- if (cost_limit < LONG_MAX)
302
- {
303
- for (int i = 0; i < cost_c_extended.size(); i++)
304
- {
305
- for (int j = 0; j < cost_c_extended[i].size(); j++)
306
- {
307
- cost_c_extended[i][j] = cost_limit / 2.0;
308
- }
309
- }
310
- }
311
- else
312
- {
313
- float cost_max = -1;
314
- for (int i = 0; i < cost_c.size(); i++)
315
- {
316
- for (int j = 0; j < cost_c[i].size(); j++)
317
- {
318
- if (cost_c[i][j] > cost_max)
319
- cost_max = cost_c[i][j];
320
- }
321
- }
322
- for (int i = 0; i < cost_c_extended.size(); i++)
323
- {
324
- for (int j = 0; j < cost_c_extended[i].size(); j++)
325
- {
326
- cost_c_extended[i][j] = cost_max + 1;
327
- }
328
- }
329
- }
330
-
331
- for (int i = n_rows; i < cost_c_extended.size(); i++)
332
- {
333
- for (int j = n_cols; j < cost_c_extended[i].size(); j++)
334
- {
335
- cost_c_extended[i][j] = 0;
336
- }
337
- }
338
- for (int i = 0; i < n_rows; i++)
339
- {
340
- for (int j = 0; j < n_cols; j++)
341
- {
342
- cost_c_extended[i][j] = cost_c[i][j];
343
- }
344
- }
345
-
346
- cost_c.clear();
347
- cost_c.assign(cost_c_extended.begin(), cost_c_extended.end());
348
- }
349
-
350
- double **cost_ptr;
351
- cost_ptr = new double *[sizeof(double *) * n];
352
- for (int i = 0; i < n; i++)
353
- cost_ptr[i] = new double[sizeof(double) * n];
354
-
355
- for (int i = 0; i < n; i++)
356
- {
357
- for (int j = 0; j < n; j++)
358
- {
359
- cost_ptr[i][j] = cost_c[i][j];
360
- }
361
- }
362
-
363
- int* x_c = new int[sizeof(int) * n];
364
- int *y_c = new int[sizeof(int) * n];
365
-
366
- int ret = lapjv_internal(n, cost_ptr, x_c, y_c);
367
- if (ret != 0)
368
- {
369
- cout << "Calculate Wrong!" << endl;
370
- system("pause");
371
- exit(0);
372
- }
373
-
374
- double opt = 0.0;
375
-
376
- if (n != n_rows)
377
- {
378
- for (int i = 0; i < n; i++)
379
- {
380
- if (x_c[i] >= n_cols)
381
- x_c[i] = -1;
382
- if (y_c[i] >= n_rows)
383
- y_c[i] = -1;
384
- }
385
- for (int i = 0; i < n_rows; i++)
386
- {
387
- rowsol[i] = x_c[i];
388
- }
389
- for (int i = 0; i < n_cols; i++)
390
- {
391
- colsol[i] = y_c[i];
392
- }
393
-
394
- if (return_cost)
395
- {
396
- for (int i = 0; i < rowsol.size(); i++)
397
- {
398
- if (rowsol[i] != -1)
399
- {
400
- //cout << i << "\t" << rowsol[i] << "\t" << cost_ptr[i][rowsol[i]] << endl;
401
- opt += cost_ptr[i][rowsol[i]];
402
- }
403
- }
404
- }
405
- }
406
- else if (return_cost)
407
- {
408
- for (int i = 0; i < rowsol.size(); i++)
409
- {
410
- opt += cost_ptr[i][rowsol[i]];
411
- }
412
- }
413
-
414
- for (int i = 0; i < n; i++)
415
- {
416
- delete[]cost_ptr[i];
417
- }
418
- delete[]cost_ptr;
419
- delete[]x_c;
420
- delete[]y_c;
421
-
422
- return opt;
423
- }
424
-
425
- Scalar BYTETracker::get_color(int idx)
426
- {
427
- idx += 3;
428
- return Scalar(37 * idx % 255, 17 * idx % 255, 29 * idx % 255);
429
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/bytetrack/exps/default/yolov3.py DELETED
@@ -1,89 +0,0 @@
1
- #!/usr/bin/env python3
2
- # -*- coding:utf-8 -*-
3
- # Copyright (c) Megvii, Inc. and its affiliates.
4
-
5
- import os
6
- import torch
7
- import torch.nn as nn
8
-
9
- from yolox.exp import Exp as MyExp
10
-
11
-
12
- class Exp(MyExp):
13
- def __init__(self):
14
- super(Exp, self).__init__()
15
- self.depth = 1.0
16
- self.width = 1.0
17
- self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
18
-
19
- def get_model(self, sublinear=False):
20
- def init_yolo(M):
21
- for m in M.modules():
22
- if isinstance(m, nn.BatchNorm2d):
23
- m.eps = 1e-3
24
- m.momentum = 0.03
25
- if "model" not in self.__dict__:
26
- from yolox.models import YOLOX, YOLOFPN, YOLOXHead
27
- backbone = YOLOFPN()
28
- head = YOLOXHead(self.num_classes, self.width, in_channels=[128, 256, 512], act="lrelu")
29
- self.model = YOLOX(backbone, head)
30
- self.model.apply(init_yolo)
31
- self.model.head.initialize_biases(1e-2)
32
-
33
- return self.model
34
-
35
- def get_data_loader(self, batch_size, is_distributed, no_aug=False):
36
- from data.datasets.cocodataset import COCODataset
37
- from data.datasets.mosaicdetection import MosaicDetection
38
- from data.datasets.data_augment import TrainTransform
39
- from data.datasets.dataloading import YoloBatchSampler, DataLoader, InfiniteSampler
40
- import torch.distributed as dist
41
-
42
- dataset = COCODataset(
43
- data_dir='data/COCO/',
44
- json_file=self.train_ann,
45
- img_size=self.input_size,
46
- preproc=TrainTransform(
47
- rgb_means=(0.485, 0.456, 0.406),
48
- std=(0.229, 0.224, 0.225),
49
- max_labels=50
50
- ),
51
- )
52
-
53
- dataset = MosaicDetection(
54
- dataset,
55
- mosaic=not no_aug,
56
- img_size=self.input_size,
57
- preproc=TrainTransform(
58
- rgb_means=(0.485, 0.456, 0.406),
59
- std=(0.229, 0.224, 0.225),
60
- max_labels=120
61
- ),
62
- degrees=self.degrees,
63
- translate=self.translate,
64
- scale=self.scale,
65
- shear=self.shear,
66
- perspective=self.perspective,
67
- )
68
-
69
- self.dataset = dataset
70
-
71
- if is_distributed:
72
- batch_size = batch_size // dist.get_world_size()
73
- sampler = InfiniteSampler(len(self.dataset), seed=self.seed if self.seed else 0)
74
- else:
75
- sampler = torch.utils.data.RandomSampler(self.dataset)
76
-
77
- batch_sampler = YoloBatchSampler(
78
- sampler=sampler,
79
- batch_size=batch_size,
80
- drop_last=False,
81
- input_dimension=self.input_size,
82
- mosaic=not no_aug
83
- )
84
-
85
- dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
86
- dataloader_kwargs["batch_sampler"] = batch_sampler
87
- train_loader = DataLoader(self.dataset, **dataloader_kwargs)
88
-
89
- return train_loader
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/bytetrack/tutorials/centertrack/mot_online/basetrack.py DELETED
@@ -1,52 +0,0 @@
1
- import numpy as np
2
- from collections import OrderedDict
3
-
4
-
5
- class TrackState(object):
6
- New = 0
7
- Tracked = 1
8
- Lost = 2
9
- Removed = 3
10
-
11
-
12
- class BaseTrack(object):
13
- _count = 0
14
-
15
- track_id = 0
16
- is_activated = False
17
- state = TrackState.New
18
-
19
- history = OrderedDict()
20
- features = []
21
- curr_feature = None
22
- score = 0
23
- start_frame = 0
24
- frame_id = 0
25
- time_since_update = 0
26
-
27
- # multi-camera
28
- location = (np.inf, np.inf)
29
-
30
- @property
31
- def end_frame(self):
32
- return self.frame_id
33
-
34
- @staticmethod
35
- def next_id():
36
- BaseTrack._count += 1
37
- return BaseTrack._count
38
-
39
- def activate(self, *args):
40
- raise NotImplementedError
41
-
42
- def predict(self):
43
- raise NotImplementedError
44
-
45
- def update(self, *args, **kwargs):
46
- raise NotImplementedError
47
-
48
- def mark_lost(self):
49
- self.state = TrackState.Lost
50
-
51
- def mark_removed(self):
52
- self.state = TrackState.Removed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Eddycrack864/Applio-Inference/utils/dependency.py DELETED
@@ -1,170 +0,0 @@
1
- import os
2
- import csv
3
- import shutil
4
- import tarfile
5
- import subprocess
6
- from pathlib import Path
7
- from datetime import datetime
8
-
9
- def install_packages_but_jank_af():
10
- packages = ['build-essential', 'python3-dev', 'ffmpeg', 'aria2']
11
- pip_packages = ['pip', 'setuptools', 'wheel', 'httpx==0.23.0', 'faiss-gpu', 'fairseq', 'gradio==3.34.0',
12
- 'ffmpeg', 'ffmpeg-python', 'praat-parselmouth', 'pyworld', 'numpy==1.23.5',
13
- 'numba==0.56.4', 'librosa==0.9.2', 'mega.py', 'gdown', 'onnxruntime', 'pyngrok==4.1.12',
14
- 'gTTS', 'elevenlabs', 'wget', 'tensorboardX', 'unidecode', 'huggingface-hub', 'stftpitchshift==1.5.1',
15
- 'yt-dlp', 'pedalboard', 'pathvalidate', 'nltk', 'edge-tts', 'git+https://github.com/suno-ai/bark.git', 'python-dotenv' , 'av']
16
-
17
- print("Updating and installing system packages...")
18
- for package in packages:
19
- print(f"Installing {package}...")
20
- subprocess.check_call(['apt-get', 'install', '-qq', '-y', package])
21
-
22
- print("Updating and installing pip packages...")
23
- subprocess.check_call(['pip', 'install', '--upgrade'] + pip_packages)
24
-
25
- print('Packages up to date.')
26
-
27
-
28
- def setup_environment(ForceUpdateDependencies, ForceTemporaryStorage):
29
- # Mounting Google Drive
30
- if not ForceTemporaryStorage:
31
- from google.colab import drive
32
-
33
- if not os.path.exists('/content/drive'):
34
- drive.mount('/content/drive')
35
- else:
36
- print('Drive is already mounted. Proceeding...')
37
-
38
- # Function to install dependencies with progress
39
- def install_packages():
40
- packages = ['build-essential', 'python3-dev', 'ffmpeg', 'aria2']
41
- pip_packages = ['pip', 'setuptools', 'wheel', 'httpx==0.23.0', 'faiss-gpu', 'fairseq', 'gradio==3.34.0',
42
- 'ffmpeg', 'ffmpeg-python', 'praat-parselmouth', 'pyworld', 'numpy==1.23.5',
43
- 'numba==0.56.4', 'librosa==0.9.2', 'mega.py', 'gdown', 'onnxruntime', 'pyngrok==4.1.12',
44
- 'gTTS', 'elevenlabs', 'wget', 'tensorboardX', 'unidecode', 'huggingface-hub', 'stftpitchshift==1.5.1',
45
- 'yt-dlp', 'pedalboard', 'pathvalidate', 'nltk', 'edge-tts', 'git+https://github.com/suno-ai/bark.git', 'python-dotenv' , 'av']
46
-
47
- print("Updating and installing system packages...")
48
- for package in packages:
49
- print(f"Installing {package}...")
50
- subprocess.check_call(['apt-get', 'install', '-qq', '-y', package])
51
-
52
- print("Updating and installing pip packages...")
53
- subprocess.check_call(['pip', 'install', '--upgrade'] + pip_packages)
54
-
55
-
56
- print('Packages up to date.')
57
-
58
- # Function to scan a directory and writes filenames and timestamps
59
- def scan_and_write(base_path, output_file):
60
- with open(output_file, 'w', newline='') as f:
61
- writer = csv.writer(f)
62
- for dirpath, dirs, files in os.walk(base_path):
63
- for filename in files:
64
- fname = os.path.join(dirpath, filename)
65
- try:
66
- mtime = os.path.getmtime(fname)
67
- writer.writerow([fname, mtime])
68
- except Exception as e:
69
- print(f'Skipping irrelevant nonexistent file {fname}: {str(e)}')
70
- print(f'Finished recording filesystem timestamps to {output_file}.')
71
-
72
- # Function to compare files
73
- def compare_files(old_file, new_file):
74
- old_files = {}
75
- new_files = {}
76
-
77
- with open(old_file, 'r') as f:
78
- reader = csv.reader(f)
79
- old_files = {rows[0]:rows[1] for rows in reader}
80
-
81
- with open(new_file, 'r') as f:
82
- reader = csv.reader(f)
83
- new_files = {rows[0]:rows[1] for rows in reader}
84
-
85
- removed_files = old_files.keys() - new_files.keys()
86
- added_files = new_files.keys() - old_files.keys()
87
- unchanged_files = old_files.keys() & new_files.keys()
88
-
89
- changed_files = {f for f in unchanged_files if old_files[f] != new_files[f]}
90
-
91
- for file in removed_files:
92
- print(f'File has been removed: {file}')
93
-
94
- for file in changed_files:
95
- print(f'File has been updated: {file}')
96
-
97
- return list(added_files) + list(changed_files)
98
-
99
- # Check if CachedRVC.tar.gz exists
100
- if ForceTemporaryStorage:
101
- file_path = '/content/CachedRVC.tar.gz'
102
- else:
103
- file_path = '/content/drive/MyDrive/RVC_Cached/CachedRVC.tar.gz'
104
-
105
- content_file_path = '/content/CachedRVC.tar.gz'
106
- extract_path = '/'
107
-
108
- if not os.path.exists(file_path):
109
- folder_path = os.path.dirname(file_path)
110
- os.makedirs(folder_path, exist_ok=True)
111
- print('No cached dependency install found. Attempting to download GitHub backup..')
112
-
113
- try:
114
- download_url = "https://github.com/kalomaze/QuickMangioFixes/releases/download/release3/CachedRVC.tar.gz"
115
- subprocess.run(["wget", "-O", file_path, download_url])
116
- print('Download completed successfully!')
117
- except Exception as e:
118
- print('Download failed:', str(e))
119
-
120
- # Delete the failed download file
121
- if os.path.exists(file_path):
122
- os.remove(file_path)
123
- print('Failed download file deleted. Continuing manual backup..')
124
-
125
- if Path(file_path).exists():
126
- if ForceTemporaryStorage:
127
- print('Finished downloading CachedRVC.tar.gz.')
128
- else:
129
- print('CachedRVC.tar.gz found on Google Drive. Proceeding to copy and extract...')
130
-
131
- # Check if ForceTemporaryStorage is True and skip copying if it is
132
- if ForceTemporaryStorage:
133
- pass
134
- else:
135
- shutil.copy(file_path, content_file_path)
136
-
137
- print('Beginning backup copy operation...')
138
-
139
- with tarfile.open(content_file_path, 'r:gz') as tar:
140
- for member in tar.getmembers():
141
- target_path = os.path.join(extract_path, member.name)
142
- try:
143
- tar.extract(member, extract_path)
144
- except Exception as e:
145
- print('Failed to extract a file (this isn\'t normal)... forcing an update to compensate')
146
- ForceUpdateDependencies = True
147
- print(f'Extraction of {content_file_path} to {extract_path} completed.')
148
-
149
- if ForceUpdateDependencies:
150
- install_packages()
151
- ForceUpdateDependencies = False
152
- else:
153
- print('CachedRVC.tar.gz not found. Proceeding to create an index of all current files...')
154
- scan_and_write('/usr/', '/content/usr_files.csv')
155
-
156
- install_packages()
157
-
158
- scan_and_write('/usr/', '/content/usr_files_new.csv')
159
- changed_files = compare_files('/content/usr_files.csv', '/content/usr_files_new.csv')
160
-
161
- with tarfile.open('/content/CachedRVC.tar.gz', 'w:gz') as new_tar:
162
- for file in changed_files:
163
- new_tar.add(file)
164
- print(f'Added to tar: {file}')
165
-
166
- os.makedirs('/content/drive/MyDrive/RVC_Cached', exist_ok=True)
167
- shutil.copy('/content/CachedRVC.tar.gz', '/content/drive/MyDrive/RVC_Cached/CachedRVC.tar.gz')
168
- print('Updated CachedRVC.tar.gz copied to Google Drive.')
169
- print('Dependencies fully up to date; future runs should be faster.')
170
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Enterprisium/Easy_GUI/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py DELETED
@@ -1,86 +0,0 @@
1
- from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
2
- import pyworld
3
- import numpy as np
4
-
5
-
6
- class HarvestF0Predictor(F0Predictor):
7
- def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
8
- self.hop_length = hop_length
9
- self.f0_min = f0_min
10
- self.f0_max = f0_max
11
- self.sampling_rate = sampling_rate
12
-
13
- def interpolate_f0(self, f0):
14
- """
15
- 对F0进行插值处理
16
- """
17
-
18
- data = np.reshape(f0, (f0.size, 1))
19
-
20
- vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
21
- vuv_vector[data > 0.0] = 1.0
22
- vuv_vector[data <= 0.0] = 0.0
23
-
24
- ip_data = data
25
-
26
- frame_number = data.size
27
- last_value = 0.0
28
- for i in range(frame_number):
29
- if data[i] <= 0.0:
30
- j = i + 1
31
- for j in range(i + 1, frame_number):
32
- if data[j] > 0.0:
33
- break
34
- if j < frame_number - 1:
35
- if last_value > 0.0:
36
- step = (data[j] - data[i - 1]) / float(j - i)
37
- for k in range(i, j):
38
- ip_data[k] = data[i - 1] + step * (k - i + 1)
39
- else:
40
- for k in range(i, j):
41
- ip_data[k] = data[j]
42
- else:
43
- for k in range(i, frame_number):
44
- ip_data[k] = last_value
45
- else:
46
- ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
47
- last_value = data[i]
48
-
49
- return ip_data[:, 0], vuv_vector[:, 0]
50
-
51
- def resize_f0(self, x, target_len):
52
- source = np.array(x)
53
- source[source < 0.001] = np.nan
54
- target = np.interp(
55
- np.arange(0, len(source) * target_len, len(source)) / target_len,
56
- np.arange(0, len(source)),
57
- source,
58
- )
59
- res = np.nan_to_num(target)
60
- return res
61
-
62
- def compute_f0(self, wav, p_len=None):
63
- if p_len is None:
64
- p_len = wav.shape[0] // self.hop_length
65
- f0, t = pyworld.harvest(
66
- wav.astype(np.double),
67
- fs=self.hop_length,
68
- f0_ceil=self.f0_max,
69
- f0_floor=self.f0_min,
70
- frame_period=1000 * self.hop_length / self.sampling_rate,
71
- )
72
- f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs)
73
- return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
74
-
75
- def compute_f0_uv(self, wav, p_len=None):
76
- if p_len is None:
77
- p_len = wav.shape[0] // self.hop_length
78
- f0, t = pyworld.harvest(
79
- wav.astype(np.double),
80
- fs=self.sampling_rate,
81
- f0_floor=self.f0_min,
82
- f0_ceil=self.f0_max,
83
- frame_period=1000 * self.hop_length / self.sampling_rate,
84
- )
85
- f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
86
- return self.interpolate_f0(self.resize_f0(f0, p_len))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EsoCode/text-generation-webui/extensions/multimodal/pipelines/llava/llava.py DELETED
@@ -1,145 +0,0 @@
1
- import time
2
- from abc import abstractmethod
3
- from typing import List, Tuple
4
-
5
- import torch
6
- from huggingface_hub import hf_hub_download
7
- from PIL import Image
8
- from transformers import CLIPImageProcessor, CLIPVisionModel
9
-
10
- from extensions.multimodal.abstract_pipeline import AbstractMultimodalPipeline
11
- from modules import shared
12
- from modules.logging_colors import logger
13
- from modules.text_generation import encode
14
-
15
-
16
- class LLaVA_v0_Pipeline(AbstractMultimodalPipeline):
17
- CLIP_REPO = "openai/clip-vit-large-patch14"
18
-
19
- def __init__(self, params: dict) -> None:
20
- super().__init__()
21
- self.clip_device = self._get_device("vision_device", params)
22
- self.clip_dtype = self._get_dtype("vision_bits", params)
23
- self.projector_device = self._get_device("projector_device", params)
24
- self.projector_dtype = self._get_dtype("projector_bits", params)
25
- self.image_processor, self.vision_tower, self.mm_projector = self._load_models()
26
-
27
- def _load_models(self):
28
- start_ts = time.time()
29
-
30
- logger.info(f"LLaVA - Loading CLIP from {LLaVA_v0_Pipeline.CLIP_REPO} as {self.clip_dtype} on {self.clip_device}...")
31
- image_processor = CLIPImageProcessor.from_pretrained(LLaVA_v0_Pipeline.CLIP_REPO, torch_dtype=self.clip_dtype)
32
- vision_tower = CLIPVisionModel.from_pretrained(LLaVA_v0_Pipeline.CLIP_REPO, torch_dtype=self.clip_dtype).to(self.clip_device)
33
-
34
- logger.info(f"LLaVA - Loading projector from {self.llava_projector_repo()} as {self.projector_dtype} on {self.projector_device}...")
35
- projector_path = hf_hub_download(self.llava_projector_repo(), self.llava_projector_filename())
36
- mm_projector = torch.nn.Linear(*self.llava_projector_shape())
37
- projector_data = torch.load(projector_path)
38
- mm_projector.weight = torch.nn.Parameter(projector_data['model.mm_projector.weight'].to(dtype=self.projector_dtype), False)
39
- mm_projector.bias = torch.nn.Parameter(projector_data['model.mm_projector.bias'].to(dtype=self.projector_dtype), False)
40
- mm_projector = mm_projector.to(self.projector_device)
41
-
42
- logger.info(f"LLaVA supporting models loaded, took {time.time() - start_ts:.2f} seconds")
43
- return image_processor, vision_tower, mm_projector
44
-
45
- @staticmethod
46
- def image_start() -> str:
47
- return "<im_start>"
48
-
49
- @staticmethod
50
- def image_end() -> str:
51
- return "<im_end>"
52
-
53
- @staticmethod
54
- def num_image_embeds() -> int:
55
- return 256
56
-
57
- @staticmethod
58
- def embed_tokens(input_ids: torch.Tensor) -> torch.Tensor:
59
- if hasattr(shared.model.model, 'embed_tokens'):
60
- func = shared.model.model.embed_tokens
61
- else:
62
- func = shared.model.model.model.embed_tokens # AutoGPTQ case
63
-
64
- return func(input_ids).to(shared.model.device, dtype=shared.model.dtype)
65
-
66
- @staticmethod
67
- def placeholder_embeddings() -> torch.Tensor:
68
- return LLaVA_v0_Pipeline.embed_tokens(encode("<im_patch>"*256, add_bos_token=False)[0])
69
-
70
- def embed_images(self, images: List[Image.Image]) -> torch.Tensor:
71
- images = self.image_processor(images, return_tensors='pt')['pixel_values']
72
- images = images.to(self.clip_device, dtype=self.clip_dtype)
73
-
74
- with torch.no_grad():
75
- image_forward_outs = self.vision_tower(images, output_hidden_states=True)
76
- select_hidden_state_layer = -2
77
- select_hidden_state = image_forward_outs.hidden_states[select_hidden_state_layer]
78
- image_features = select_hidden_state[:, 1:].to(self.projector_device, dtype=self.projector_dtype)
79
- image_features = self.mm_projector(image_features)
80
- return image_features.to(shared.model.device, dtype=shared.model.dtype)
81
-
82
- @staticmethod
83
- @abstractmethod
84
- def llava_projector_repo() -> str:
85
- pass
86
-
87
- @staticmethod
88
- @abstractmethod
89
- def llava_projector_filename() -> str:
90
- pass
91
-
92
- @staticmethod
93
- @abstractmethod
94
- def llava_projector_shape() -> Tuple[int, int]:
95
- pass
96
-
97
-
98
- class LLaVA_v0_13B_Pipeline(LLaVA_v0_Pipeline):
99
- def __init__(self, params: dict) -> None:
100
- super().__init__(params)
101
-
102
- @staticmethod
103
- def name() -> str:
104
- return "llava-13b"
105
-
106
- @staticmethod
107
- def placeholder_token_id() -> int:
108
- return 32000
109
-
110
- @staticmethod
111
- def llava_projector_shape() -> Tuple[int, int]:
112
- return (1024, 5120)
113
-
114
- @staticmethod
115
- def llava_projector_filename() -> str:
116
- return "mm_projector.bin"
117
-
118
- @staticmethod
119
- def llava_projector_repo() -> str:
120
- return "liuhaotian/LLaVA-13b-delta-v0"
121
-
122
-
123
- class LLaVA_v0_7B_Pipeline(LLaVA_v0_Pipeline):
124
- def __init__(self, params: dict) -> None:
125
- super().__init__(params)
126
-
127
- @staticmethod
128
- def name() -> str:
129
- return "llava-7b"
130
-
131
- @staticmethod
132
- def placeholder_token_id() -> int:
133
- return 32001
134
-
135
- @staticmethod
136
- def llava_projector_shape() -> Tuple[int, int]:
137
- return (1024, 4096)
138
-
139
- @staticmethod
140
- def llava_projector_filename() -> str:
141
- return "mm_projector.bin"
142
-
143
- @staticmethod
144
- def llava_projector_repo() -> str:
145
- return "liuhaotian/LLaVA-7b-delta-v0"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/FKBaffour/Expresso_Customer_Churn_Prediction/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Expresso Customer Churn Prediction
3
- emoji: 💩
4
- colorFrom: red
5
- colorTo: purple
6
- sdk: streamlit
7
- sdk_version: 1.17.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference